aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/kernel-api.tmpl1
-rw-r--r--Documentation/acpi-hotkey.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt19
-rw-r--r--Documentation/fujitsu/frv/kernel-ABI.txt192
-rw-r--r--Documentation/input/joystick-parport.txt11
-rw-r--r--Documentation/kernel-parameters.txt34
-rw-r--r--Documentation/leds-class.txt71
-rw-r--r--Documentation/memory-barriers.txt1913
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/networking/tuntap.txt2
-rw-r--r--Documentation/pcmcia/driver-changes.txt6
-rw-r--r--Documentation/video4linux/CARDLIST.saa71345
-rw-r--r--Documentation/video4linux/et61x251.txt (renamed from Documentation/usb/et61x251.txt)0
-rw-r--r--Documentation/video4linux/ibmcam.txt (renamed from Documentation/usb/ibmcam.txt)2
-rw-r--r--Documentation/video4linux/ov511.txt (renamed from Documentation/usb/ov511.txt)11
-rw-r--r--Documentation/video4linux/se401.txt (renamed from Documentation/usb/se401.txt)0
-rw-r--r--Documentation/video4linux/sn9c102.txt (renamed from Documentation/usb/sn9c102.txt)16
-rw-r--r--Documentation/video4linux/stv680.txt (renamed from Documentation/usb/stv680.txt)26
-rw-r--r--Documentation/video4linux/w9968cf.txt (renamed from Documentation/usb/w9968cf.txt)36
-rw-r--r--Documentation/video4linux/zc0301.txt (renamed from Documentation/usb/zc0301.txt)0
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c2
-rw-r--r--arch/alpha/kernel/core_marvel.c2
-rw-r--r--arch/alpha/kernel/setup.c18
-rw-r--r--arch/arm/Kconfig10
-rw-r--r--arch/arm/Kconfig-nommu44
-rw-r--r--arch/arm/Makefile9
-rw-r--r--arch/arm/boot/compressed/head.S106
-rw-r--r--arch/arm/common/sharpsl_pm.c10
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head-common.S217
-rw-r--r--arch/arm/kernel/head-nommu.S83
-rw-r--r--arch/arm/kernel/head.S207
-rw-r--r--arch/arm/kernel/process.c1
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/traps.c9
-rw-r--r--arch/arm/mach-pxa/corgi.c11
-rw-r--r--arch/arm/mach-pxa/spitz.c11
-rw-r--r--arch/arm/mach-pxa/tosa.c9
-rw-r--r--arch/arm/mm/proc-xsc3.S1
-rw-r--r--arch/arm26/kernel/armksyms.c2
-rw-r--r--arch/frv/kernel/frv_ksyms.c2
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c2
-rw-r--r--arch/i386/kernel/apic.c22
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/i386/kernel/crash.c2
-rw-r--r--arch/i386/kernel/io_apic.c2
-rw-r--r--arch/i386/kernel/process.c1
-rw-r--r--arch/i386/kernel/setup.c18
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/traps.c2
-rw-r--r--arch/i386/kernel/vsyscall-sigreturn.S2
-rw-r--r--arch/ia64/kernel/palinfo.c8
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/ia64/kernel/topology.c367
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c1
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c2
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/i8253.c28
-rw-r--r--arch/mips/kernel/process.c1
-rw-r--r--arch/powerpc/kernel/crash_dump.c4
-rw-r--r--arch/powerpc/kernel/lparcfg.c31
-rw-r--r--arch/powerpc/kernel/process.c1
-rw-r--r--arch/powerpc/kernel/rtas.c12
-rw-r--r--arch/powerpc/kernel/setup-common.c24
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/kernel/systbl.S1
-rw-r--r--arch/powerpc/kernel/traps.c9
-rw-r--r--arch/powerpc/kernel/vdso32/sigtramp.S2
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S2
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_sys.c40
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c40
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c62
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c19
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c30
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S100
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c6
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c22
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c31
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/platforms/pseries/vio.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c8
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sparc/kernel/systbls.S4
-rw-r--r--arch/sparc64/defconfig14
-rw-r--r--arch/sparc64/kernel/smp.c9
-rw-r--r--arch/sparc64/kernel/sys32.S2
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c8
-rw-r--r--arch/sparc64/kernel/systbls.S8
-rw-r--r--arch/sparc64/mm/fault.c6
-rw-r--r--arch/sparc64/mm/hugetlbpage.c7
-rw-r--r--arch/um/Kconfig3
-rw-r--r--arch/um/Makefile7
-rw-r--r--arch/um/Makefile-x86_642
-rw-r--r--arch/um/drivers/daemon_kern.c13
-rw-r--r--arch/um/drivers/harddog_kern.c8
-rw-r--r--arch/um/drivers/hostaudio_kern.c10
-rw-r--r--arch/um/drivers/mcast_kern.c13
-rw-r--r--arch/um/drivers/mconsole_kern.c140
-rw-r--r--arch/um/drivers/pcap_kern.c13
-rw-r--r--arch/um/drivers/slip_kern.c13
-rw-r--r--arch/um/drivers/slirp_kern.c15
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/include/kern_util.h6
-rw-r--r--arch/um/include/line.h18
-rw-r--r--arch/um/include/mem_user.h1
-rw-r--r--arch/um/include/os.h10
-rw-r--r--arch/um/include/sysdep-i386/checksum.h5
-rw-r--r--arch/um/include/sysdep-i386/ptrace.h5
-rw-r--r--arch/um/include/sysdep-i386/tls.h32
-rw-r--r--arch/um/include/sysdep-x86_64/tls.h29
-rw-r--r--arch/um/include/user_util.h5
-rw-r--r--arch/um/kernel/exec_kern.c16
-rw-r--r--arch/um/kernel/mem.c2
-rw-r--r--arch/um/kernel/process_kern.c26
-rw-r--r--arch/um/kernel/ptrace.c44
-rw-r--r--arch/um/kernel/skas/process_kern.c11
-rw-r--r--arch/um/kernel/syscall_kern.c4
-rw-r--r--arch/um/kernel/trap_kern.c8
-rw-r--r--arch/um/kernel/tt/process_kern.c10
-rw-r--r--arch/um/os-Linux/Makefile7
-rw-r--r--arch/um/os-Linux/drivers/ethertap_kern.c13
-rw-r--r--arch/um/os-Linux/drivers/tuntap_kern.c13
-rw-r--r--arch/um/os-Linux/mem.c27
-rw-r--r--arch/um/os-Linux/process.c44
-rw-r--r--arch/um/os-Linux/start_up.c20
-rw-r--r--arch/um/os-Linux/sys-i386/Makefile2
-rw-r--r--arch/um/os-Linux/sys-i386/tls.c33
-rw-r--r--arch/um/os-Linux/tls.c76
-rw-r--r--arch/um/scripts/Makefile.rules26
-rw-r--r--arch/um/scripts/Makefile.unmap22
-rw-r--r--arch/um/sys-i386/Makefile23
-rw-r--r--arch/um/sys-i386/ptrace.c45
-rw-r--r--arch/um/sys-i386/ptrace_user.c10
-rw-r--r--arch/um/sys-i386/signal.c48
-rw-r--r--arch/um/sys-i386/sys_call_table.S2
-rw-r--r--arch/um/sys-i386/syscalls.c16
-rw-r--r--arch/um/sys-i386/tls.c384
-rw-r--r--arch/um/sys-x86_64/Makefile34
-rw-r--r--arch/um/sys-x86_64/tls.c14
-rw-r--r--arch/x86_64/ia32/vsyscall-sigreturn.S23
-rw-r--r--arch/x86_64/kernel/apic.c14
-rw-r--r--arch/x86_64/kernel/early_printk.c2
-rw-r--r--arch/x86_64/kernel/mce.c4
-rw-r--r--arch/x86_64/kernel/pmtimer.c2
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/setup64.c4
-rw-r--r--arch/x86_64/kernel/smpboot.c2
-rw-r--r--arch/x86_64/kernel/time.c4
-rw-r--r--arch/x86_64/kernel/traps.c4
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c2
-rw-r--r--arch/x86_64/mm/fault.c2
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c2
-rw-r--r--block/Kconfig8
-rw-r--r--block/elevator.c2
-rw-r--r--block/genhd.c103
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/bluetooth/bluecard_cs.c119
-rw-r--r--drivers/bluetooth/bt3c_cs.c130
-rw-r--r--drivers/bluetooth/btuart_cs.c130
-rw-r--r--drivers/bluetooth/dtl1_cs.c120
-rw-r--r--drivers/char/hvcs.c2
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c18
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c8
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c80
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c85
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c61
-rw-r--r--drivers/char/istallion.c32
-rw-r--r--drivers/char/keyboard.c118
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c121
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c133
-rw-r--r--drivers/char/pcmcia/synclink_cs.c116
-rw-r--r--drivers/char/stallion.c46
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/vt.c4
-rw-r--r--drivers/char/watchdog/Kconfig7
-rw-r--r--drivers/char/watchdog/Makefile1
-rw-r--r--drivers/char/watchdog/at91_wdt.c228
-rw-r--r--drivers/char/watchdog/pcwd.c137
-rw-r--r--drivers/char/watchdog/pcwd_usb.c3
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/hwmon/hdaps.c37
-rw-r--r--drivers/ide/ide-disk.c3
-rw-r--r--drivers/ide/ide-taskfile.c8
-rw-r--r--drivers/ide/legacy/ide-cs.c127
-rw-r--r--drivers/ieee1394/sbp2.c32
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig16
-rw-r--r--drivers/infiniband/hw/ipath/Makefile36
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h616
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c295
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h96
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c379
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c1983
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c613
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c1910
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c605
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ht400.c1586
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c951
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c841
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h884
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c236
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c1515
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.h181
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c1352
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c383
-rw-r--r--drivers/infiniband/hw/ipath/ipath_pe800.c1247
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c913
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c1857
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h446
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c552
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c273
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c303
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c778
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c645
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c621
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c207
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c1222
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h697
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c333
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_x86_64.c157
-rw-r--r--drivers/infiniband/hw/ipath/ips_common.h263
-rw-r--r--drivers/infiniband/hw/ipath/verbs_debug.h107
-rw-r--r--drivers/input/evbug.c3
-rw-r--r--drivers/input/evdev.c6
-rw-r--r--drivers/input/gameport/gameport.c30
-rw-r--r--drivers/input/gameport/ns558.c13
-rw-r--r--drivers/input/input.c422
-rw-r--r--drivers/input/joydev.c6
-rw-r--r--drivers/input/joystick/amijoy.c11
-rw-r--r--drivers/input/joystick/db9.c13
-rw-r--r--drivers/input/joystick/gamecon.c96
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c24
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c2
-rw-r--r--drivers/input/joystick/iforce/iforce.h5
-rw-r--r--drivers/input/joystick/turbografx.c13
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/atkbd.c24
-rw-r--r--drivers/input/keyboard/corgikbd.c35
-rw-r--r--drivers/input/keyboard/hil_kbd.c9
-rw-r--r--drivers/input/keyboard/spitzkbd.c10
-rw-r--r--drivers/input/misc/pcspkr.c27
-rw-r--r--drivers/input/misc/uinput.c14
-rw-r--r--drivers/input/mouse/hil_ptr.c7
-rw-r--r--drivers/input/mouse/psmouse-base.c38
-rw-r--r--drivers/input/mouse/synaptics.c18
-rw-r--r--drivers/input/mousedev.c6
-rw-r--r--drivers/input/power.c3
-rw-r--r--drivers/input/serio/hil_mlc.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h26
-rw-r--r--drivers/input/serio/libps2.c10
-rw-r--r--drivers/input/serio/parkbd.c3
-rw-r--r--drivers/input/serio/rpckbd.c3
-rw-r--r--drivers/input/serio/serio.c48
-rw-r--r--drivers/input/serio/serio_raw.c29
-rw-r--r--drivers/input/tsdev.c6
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c185
-rw-r--r--drivers/isdn/hisax/avma1_cs.c182
-rw-r--r--drivers/isdn/hisax/elsa_cs.c112
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c143
-rw-r--r--drivers/isdn/hisax/teles_cs.c121
-rw-r--r--drivers/isdn/sc/ioctl.c9
-rw-r--r--drivers/leds/Kconfig77
-rw-r--r--drivers/leds/Makefile16
-rw-r--r--drivers/leds/led-class.c167
-rw-r--r--drivers/leds/led-core.c25
-rw-r--r--drivers/leds/led-triggers.c239
-rw-r--r--drivers/leds/leds-corgi.c121
-rw-r--r--drivers/leds/leds-ixp4xx-gpio.c215
-rw-r--r--drivers/leds/leds-locomo.c95
-rw-r--r--drivers/leds/leds-spitz.c125
-rw-r--r--drivers/leds/leds-tosa.c131
-rw-r--r--drivers/leds/leds.h44
-rw-r--r--drivers/leds/ledtrig-ide-disk.c62
-rw-r--r--drivers/leds/ledtrig-timer.c170
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid1.c19
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c34
-rw-r--r--drivers/md/raid6main.c31
-rw-r--r--drivers/media/Kconfig24
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c12
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c18
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h2
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c54
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c47
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h2
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x-fe.c5
-rw-r--r--drivers/media/dvb/frontends/Kconfig12
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c12
-rw-r--r--drivers/media/dvb/ttpci/av7110.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c13
-rw-r--r--drivers/media/dvb/ttpci/budget-core.c78
-rw-r--r--drivers/media/dvb/ttpci/budget-patch.c24
-rw-r--r--drivers/media/dvb/ttpci/budget.h13
-rw-r--r--drivers/media/video/Kconfig233
-rw-r--r--drivers/media/video/Makefile6
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c170
-rw-r--r--drivers/media/video/bt8xx/bttv-vbi.c2
-rw-r--r--drivers/media/video/cpia.c13
-rw-r--r--drivers/media/video/cpia2/cpia2.h2
-rw-r--r--drivers/media/video/cpia_pp.c2
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c3
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c24
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h (renamed from drivers/media/video/cx25840/cx25840.h)46
-rw-r--r--drivers/media/video/cx25840/cx25840-firmware.c15
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c9
-rw-r--r--drivers/media/video/cx88/Kconfig15
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c4
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c33
-rw-r--r--drivers/media/video/et61x251/Kconfig14
-rw-r--r--drivers/media/video/ir-kbd-i2c.c3
-rw-r--r--drivers/media/video/msp3400-driver.c91
-rw-r--r--drivers/media/video/msp3400-driver.h6
-rw-r--r--drivers/media/video/msp3400-kthreads.c121
-rw-r--r--drivers/media/video/pwc/Kconfig28
-rw-r--r--drivers/media/video/saa7115.c65
-rw-r--r--drivers/media/video/saa7127.c43
-rw-r--r--drivers/media/video/saa7134/Kconfig1
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c66
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c4
-rw-r--r--drivers/media/video/saa7134/saa7134.h1
-rw-r--r--drivers/media/video/sn9c102/Kconfig11
-rw-r--r--drivers/media/video/tuner-core.c12
-rw-r--r--drivers/media/video/tvaudio.c15
-rw-r--r--drivers/media/video/tveeprom.c6
-rw-r--r--drivers/media/video/tvp5150.c140
-rw-r--r--drivers/media/video/upd64031a.c286
-rw-r--r--drivers/media/video/upd64083.c262
-rw-r--r--drivers/media/video/usbvideo/Kconfig38
-rw-r--r--drivers/media/video/usbvideo/Makefile8
-rw-r--r--drivers/media/video/v4l2-common.c8
-rw-r--r--drivers/media/video/video-buf.c14
-rw-r--r--drivers/media/video/wm8739.c355
-rw-r--r--drivers/media/video/zc0301/Kconfig11
-rw-r--r--drivers/mmc/Kconfig11
-rw-r--r--drivers/mmc/Makefile5
-rw-r--r--drivers/mmc/au1xmmc.c19
-rw-r--r--drivers/mmc/mmc.c19
-rw-r--r--drivers/mmc/mmci.c4
-rw-r--r--drivers/mmc/omap.c1226
-rw-r--r--drivers/mmc/omap.h55
-rw-r--r--drivers/mmc/pxamci.c24
-rw-r--r--drivers/mmc/sdhci.c6
-rw-r--r--drivers/mmc/wbsd.c9
-rw-r--r--drivers/mtd/chips/Kconfig21
-rw-r--r--drivers/mtd/chips/amd_flash.c4
-rw-r--r--drivers/mtd/chips/jedec_probe.c19
-rw-r--r--drivers/mtd/chips/sharp.c7
-rw-r--r--drivers/mtd/cmdlinepart.c7
-rw-r--r--drivers/mtd/devices/blkmtd.c13
-rw-r--r--drivers/mtd/devices/block2mtd.c13
-rw-r--r--drivers/mtd/devices/doc2000.c37
-rw-r--r--drivers/mtd/devices/lart.c10
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/devices/ms02-nv.c2
-rw-r--r--drivers/mtd/inftlcore.c7
-rw-r--r--drivers/mtd/maps/alchemy-flash.c4
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/dbox2-flash.c2
-rw-r--r--drivers/mtd/maps/dilnetpc.c4
-rw-r--r--drivers/mtd/maps/dmv182.c2
-rw-r--r--drivers/mtd/maps/h720x-flash.c2
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c3
-rw-r--r--drivers/mtd/maps/ocotea.c6
-rw-r--r--drivers/mtd/maps/pci.c3
-rw-r--r--drivers/mtd/maps/pcmciamtd.c117
-rw-r--r--drivers/mtd/maps/redwood.c3
-rw-r--r--drivers/mtd/maps/sbc8240.c8
-rw-r--r--drivers/mtd/maps/sc520cdp.c2
-rw-r--r--drivers/mtd/maps/scx200_docflash.c2
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c4
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/maps/vmax301.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c32
-rw-r--r--drivers/mtd/mtdblock.c14
-rw-r--r--drivers/mtd/mtdcore.c45
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/au1550nd.c4
-rw-r--r--drivers/mtd/nand/nand_base.c26
-rw-r--r--drivers/mtd/redboot.c6
-rw-r--r--drivers/net/3c59x.c33
-rw-r--r--drivers/net/8139cp.c12
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arcnet/arcnet.c3
-rw-r--r--drivers/net/arcnet/com90xx.c4
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/chelsio/sge.c3
-rw-r--r--drivers/net/e1000/e1000_main.c3
-rw-r--r--drivers/net/eql.c3
-rw-r--r--drivers/net/ibmveth.c30
-rw-r--r--drivers/net/irda/sa1100_ir.c3
-rw-r--r--drivers/net/ne2k-pci.c4
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/ns83820.c3
-rw-r--r--drivers/net/pcmcia/3c574_cs.c115
-rw-r--r--drivers/net/pcmcia/3c589_cs.c122
-rw-r--r--drivers/net/pcmcia/axnet_cs.c126
-rw-r--r--drivers/net/pcmcia/com20020_cs.c127
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c166
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c121
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c126
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c161
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c235
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c187
-rw-r--r--drivers/net/starfire.c3
-rw-r--r--drivers/net/tg3.c72
-rw-r--r--drivers/net/tokenring/Kconfig2
-rw-r--r--drivers/net/tokenring/abyss.c3
-rw-r--r--drivers/net/tokenring/madgemc.c3
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/airo_cs.c158
-rw-r--r--drivers/net/wireless/atmel_cs.c162
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c198
-rw-r--r--drivers/net/wireless/ipw2200.c9
-rw-r--r--drivers/net/wireless/netwave_cs.c127
-rw-r--r--drivers/net/wireless/orinoco_cs.c187
-rw-r--r--drivers/net/wireless/ray_cs.c279
-rw-r--r--drivers/net/wireless/ray_cs.h2
-rw-r--r--drivers/net/wireless/spectrum_cs.c173
-rw-r--r--drivers/net/wireless/wavelan_cs.c189
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h6
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c178
-rw-r--r--drivers/net/yellowfin.c3
-rw-r--r--drivers/parport/parport_cs.c129
-rw-r--r--drivers/pcmcia/Kconfig7
-rw-r--r--drivers/pcmcia/Makefile3
-rw-r--r--drivers/pcmcia/at91_cf.c365
-rw-r--r--drivers/pcmcia/cistpl.c1
-rw-r--r--drivers/pcmcia/cs.c43
-rw-r--r--drivers/pcmcia/cs_internal.h19
-rw-r--r--drivers/pcmcia/ds.c249
-rw-r--r--drivers/pcmcia/ds_internal.h4
-rw-r--r--drivers/pcmcia/i82092.c1
-rw-r--r--drivers/pcmcia/i82365.c1
-rw-r--r--drivers/pcmcia/pcmcia_compat.c65
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c81
-rw-r--r--drivers/pcmcia/pcmcia_resource.c228
-rw-r--r--drivers/pcmcia/pd6729.c1
-rw-r--r--drivers/pcmcia/rsrc_mgr.c5
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c41
-rw-r--r--drivers/pcmcia/sa1100_cerf.c1
-rw-r--r--drivers/pcmcia/socket_sysfs.c10
-rw-r--r--drivers/pcmcia/ti113x.h1
-rw-r--r--drivers/pcmcia/vrc4171_card.c12
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c8
-rw-r--r--drivers/s390/block/dasd_erp.c8
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/tape_block.c13
-rw-r--r--drivers/s390/net/lcs.c13
-rw-r--r--drivers/scsi/ahci.c4
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx2
-rw-r--r--drivers/scsi/ata_piix.c4
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c10
-rw-r--r--drivers/scsi/libata-core.c28
-rw-r--r--drivers/scsi/libata-scsi.c8
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c112
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c155
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c136
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h8
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c127
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c124
-rw-r--r--drivers/serial/Kconfig27
-rw-r--r--drivers/serial/Makefile12
-rw-r--r--drivers/serial/jsm/jsm.h2
-rw-r--r--drivers/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/serial/jsm/jsm_neo.c2
-rw-r--r--drivers/serial/jsm/jsm_tty.c29
-rw-r--r--drivers/serial/serial_cs.c229
-rw-r--r--drivers/telephony/ixj_pcmcia.c119
-rw-r--r--drivers/usb/host/sl811_cs.c119
-rw-r--r--drivers/usb/input/hid-input.c2
-rw-r--r--drivers/video/Kconfig12
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/backlight/Kconfig4
-rw-r--r--drivers/video/backlight/backlight.c84
-rw-r--r--drivers/video/backlight/corgi_bl.c124
-rw-r--r--drivers/video/backlight/hp680_bl.c139
-rw-r--r--drivers/video/cfbimgblt.c2
-rw-r--r--drivers/video/console/fbcon.c11
-rw-r--r--drivers/video/console/sticore.c4
-rw-r--r--drivers/video/fbmem.c2
-rw-r--r--drivers/video/pxafb.c8
-rw-r--r--drivers/video/radeonfb.c3167
-rw-r--r--drivers/video/stifb.c4
-rw-r--r--drivers/video/w100fb.c162
-rw-r--r--drivers/video/w100fb.h748
-rw-r--r--fs/Makefile2
-rw-r--r--fs/char_dev.c87
-rw-r--r--fs/cifs/CHANGES18
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/README7
-rw-r--r--fs/cifs/cifsencrypt.c42
-rw-r--r--fs/cifs/cifsfs.c5
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h11
-rw-r--r--fs/cifs/cifspdu.h13
-rw-r--r--fs/cifs/cifsproto.h15
-rw-r--r--fs/cifs/cifssmb.c135
-rw-r--r--fs/cifs/connect.c99
-rw-r--r--fs/cifs/dir.c7
-rw-r--r--fs/cifs/file.c94
-rw-r--r--fs/cifs/inode.c22
-rw-r--r--fs/cifs/link.c2
-rw-r--r--fs/cifs/misc.c46
-rw-r--r--fs/cifs/ntlmssp.c129
-rw-r--r--fs/cifs/ntlmssp.h2
-rw-r--r--fs/cifs/readdir.c7
-rw-r--r--fs/cifs/transport.c22
-rw-r--r--fs/dcache.c50
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/dquot.c6
-rw-r--r--fs/exec.c2
-rw-r--r--fs/fcntl.c3
-rw-r--r--fs/freevxfs/vxfs_olt.c9
-rw-r--r--fs/hfsplus/bnode.c6
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/hppfs/hppfs_kern.c14
-rw-r--r--fs/inode.c15
-rw-r--r--fs/jffs2/background.c3
-rw-r--r--fs/locks.c45
-rw-r--r--fs/msdos/namei.c15
-rw-r--r--fs/namei.c3
-rw-r--r--fs/proc/base.c13
-rw-r--r--fs/proc/proc_misc.c163
-rw-r--r--fs/select.c8
-rw-r--r--fs/smbfs/file.c6
-rw-r--r--fs/splice.c25
-rw-r--r--fs/sync.c164
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/sysfs/inode.c3
-rw-r--r--fs/sysv/dir.c6
-rw-r--r--fs/udf/inode.c6
-rw-r--r--fs/vfat/namei.c18
-rw-r--r--include/asm-arm/arch-at91rm9200/hardware.h3
-rw-r--r--include/asm-arm/arch-ixp23xx/uncompress.h11
-rw-r--r--include/asm-arm/arch-pxa/pxa-regs.h2
-rw-r--r--include/asm-arm/arch-pxa/sharpsl.h2
-rw-r--r--include/asm-arm/unistd.h11
-rw-r--r--include/asm-generic/local.h13
-rw-r--r--include/asm-generic/mutex-dec.h30
-rw-r--r--include/asm-generic/mutex-xchg.h33
-rw-r--r--include/asm-i386/apicdef.h1
-rw-r--r--include/asm-i386/floppy.h34
-rw-r--r--include/asm-i386/local.h6
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-ia64/pal.h34
-rw-r--r--include/asm-powerpc/eeh.h20
-rw-r--r--include/asm-powerpc/hvcall.h185
-rw-r--r--include/asm-powerpc/system.h5
-rw-r--r--include/asm-s390/percpu.h2
-rw-r--r--include/asm-sparc/unistd.h6
-rw-r--r--include/asm-sparc64/unistd.h4
-rw-r--r--include/asm-um/desc.h12
-rw-r--r--include/asm-um/host_ldt-i386.h34
-rw-r--r--include/asm-um/host_ldt-x86_64.h (renamed from include/asm-um/ldt-x86_64.h)39
-rw-r--r--include/asm-um/ldt-i386.h69
-rw-r--r--include/asm-um/ldt.h41
-rw-r--r--include/asm-um/processor-i386.h35
-rw-r--r--include/asm-um/processor-x86_64.h9
-rw-r--r--include/asm-um/ptrace-generic.h16
-rw-r--r--include/asm-um/ptrace-i386.h41
-rw-r--r--include/asm-um/ptrace-x86_64.h35
-rw-r--r--include/asm-um/segment.h6
-rw-r--r--include/asm-um/thread_info.h16
-rw-r--r--include/asm-um/uaccess.h2
-rw-r--r--include/asm-x86_64/local.h10
-rw-r--r--include/linux/backlight.h25
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/fadvise.h6
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fs.h24
-rw-r--r--include/linux/gameport.h7
-rw-r--r--include/linux/hrtimer.h18
-rw-r--r--include/linux/input.h23
-rw-r--r--include/linux/ipmi_smi.h16
-rw-r--r--include/linux/kbd_kern.h2
-rw-r--r--include/linux/keyboard.h13
-rw-r--r--include/linux/leds.h111
-rw-r--r--include/linux/libps2.h2
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/mtd/blktrans.h4
-rw-r--r--include/linux/mtd/doc2000.h4
-rw-r--r--include/linux/mtd/inftl.h5
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/netdevice.h55
-rw-r--r--include/linux/netfilter/x_tables.h67
-rw-r--r--include/linux/netfilter/xt_esp.h14
-rw-r--r--include/linux/netfilter/xt_multiport.h30
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h18
-rw-r--r--include/linux/netfilter_ipv4/ipt_esp.h14
-rw-r--r--include/linux/netfilter_ipv4/ipt_multiport.h31
-rw-r--r--include/linux/netfilter_ipv6/ip6t_esp.h12
-rw-r--r--include/linux/netfilter_ipv6/ip6t_multiport.h25
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pid.h96
-rw-r--r--include/linux/pipe_fs_i.h3
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/serio.h9
-rw-r--r--include/linux/skbuff.h29
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/timer.h8
-rw-r--r--include/linux/tiocl.h1
-rw-r--r--include/linux/uinput.h4
-rw-r--r--include/linux/videodev2.h65
-rw-r--r--include/media/cx25840.h64
-rw-r--r--include/media/msp3400.h60
-rw-r--r--include/media/saa7115.h37
-rw-r--r--include/media/saa7127.h41
-rw-r--r--include/media/upd64031a.h40
-rw-r--r--include/media/upd64083.h58
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/xfrm.h19
-rw-r--r--include/pcmcia/bulkmem.h4
-rw-r--r--include/pcmcia/ciscode.h5
-rw-r--r--include/pcmcia/cistpl.h21
-rw-r--r--include/pcmcia/cs.h34
-rw-r--r--include/pcmcia/ds.h80
-rw-r--r--include/pcmcia/ss.h11
-rw-r--r--ipc/shm.c15
-rw-r--r--ipc/util.c6
-rw-r--r--kernel/acct.c12
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/cpuset.c69
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c28
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/futex_compat.c4
-rw-r--r--kernel/hrtimer.c49
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/pid.c212
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/process.c3
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/ptrace.c3
-rw-r--r--kernel/sched.c84
-rw-r--r--kernel/signal.c7
-rw-r--r--kernel/sys.c19
-rw-r--r--kernel/time.c8
-rw-r--r--kernel/timer.c95
-rw-r--r--mm/fadvise.c20
-rw-r--r--mm/highmem.c15
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mmap.c9
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/slab.c18
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/swapfile.c14
-rw-r--r--mm/vmalloc.c3
-rw-r--r--net/compat.c3
-rw-r--r--net/core/dev.c64
-rw-r--r--net/core/sock.c16
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/esp4.c5
-rw-r--r--net/ipv4/ipcomp.c3
-rw-r--r--net/ipv4/netfilter/Kconfig18
-rw-r--r--net/ipv4/netfilter/Makefile3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c1138
-rw-r--r--net/ipv4/netfilter/ipt_multiport.c195
-rw-r--r--net/ipv4/xfrm4_input.c15
-rw-r--r--net/ipv4/xfrm4_tunnel.c2
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/netfilter/Kconfig16
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/ip6t_esp.c115
-rw-r--r--net/ipv6/netfilter/ip6t_multiport.c125
-rw-r--r--net/ipv6/xfrm6_input.c11
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/netfilter/Kconfig19
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/x_tables.c113
-rw-r--r--net/netfilter/xt_esp.c (renamed from net/ipv4/netfilter/ipt_esp.c)81
-rw-r--r--net/netfilter/xt_multiport.c314
-rw-r--r--net/netfilter/xt_policy.c2
-rw-r--r--net/socket.c7
-rw-r--r--net/xfrm/xfrm_input.c4
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c86
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.h2
-rw-r--r--sound/pcmcia/vx/vxpocket.c94
-rw-r--r--sound/pcmcia/vx/vxpocket.h2
711 files changed, 44008 insertions, 13320 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 7d87dd73cbe40..5a2882d275ba5 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -2,7 +2,7 @@
# This makefile is used to generate the kernel documentation,
# primarily based on in-line comments in various source files.
# See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how
-# to ducument the SRC - and how to read it.
+# to document the SRC - and how to read it.
# To add a new book the only step required is to add the book to the
# list of DOCBOOKS.
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 8c9c6704e85ba..ca02e04a906c2 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -322,7 +322,6 @@ X!Earch/i386/kernel/mca.c
<chapter id="sysfs">
<title>The Filesystem for Exporting Kernel Objects</title>
!Efs/sysfs/file.c
-!Efs/sysfs/dir.c
!Efs/sysfs/symlink.c
!Efs/sysfs/bin.c
</chapter>
diff --git a/Documentation/acpi-hotkey.txt b/Documentation/acpi-hotkey.txt
index 744f1aec65535..38040fa376495 100644
--- a/Documentation/acpi-hotkey.txt
+++ b/Documentation/acpi-hotkey.txt
@@ -30,7 +30,7 @@ specific hotkey(event))
echo "event_num:event_type:event_argument" >
/proc/acpi/hotkey/action.
The result of the execution of this aml method is
-attached to /proc/acpi/hotkey/poll_method, which is dnyamically
+attached to /proc/acpi/hotkey/poll_method, which is dynamically
created. Please use command "cat /proc/acpi/hotkey/polling_method"
to retrieve it.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 495858b236b61..59d0c74c79c95 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -127,13 +127,6 @@ Who: Christoph Hellwig <hch@lst.de>
---------------------------
-What: EXPORT_SYMBOL(lookup_hash)
-When: January 2006
-Why: Too low-level interface. Use lookup_one_len or lookup_create instead.
-Who: Christoph Hellwig <hch@lst.de>
-
----------------------------
-
What: CONFIG_FORCED_INLINING
When: June 2006
Why: Config option is there to see if gcc is good enough. (in january
@@ -241,3 +234,15 @@ Why: The USB subsystem has changed a lot over time, and it has been
Who: Greg Kroah-Hartman <gregkh@suse.de>
---------------------------
+
+What: find_trylock_page
+When: January 2007
+Why: The interface no longer has any callers left in the kernel. It
+ is an odd interface (compared with other find_*_page functions), in
+ that it does not take a refcount to the page, only the page lock.
+ It should be replaced with find_get_page or find_lock_page if possible.
+ This feature removal can be reevaluated if users of the interface
+ cannot cleanly use something else.
+Who: Nick Piggin <npiggin@suse.de>
+
+---------------------------
diff --git a/Documentation/fujitsu/frv/kernel-ABI.txt b/Documentation/fujitsu/frv/kernel-ABI.txt
index 0ed9b0a779bca..8b0a5fc8bfd96 100644
--- a/Documentation/fujitsu/frv/kernel-ABI.txt
+++ b/Documentation/fujitsu/frv/kernel-ABI.txt
@@ -1,17 +1,19 @@
- =================================
- INTERNAL KERNEL ABI FOR FR-V ARCH
- =================================
-
-The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers
-are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs
-no-MMU.
-
-This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and
-most of them do not have any scratch registers, thus requiring at least one general purpose
-register to be clobbered in such an event. Also, within the kernel core, it is possible to simply
-jump or call directly between functions using a relative offset. This cannot be extended to modules
-for the displacement is likely to be too far. Thus in modules the address of a function to call
-must be calculated in a register and then used, requiring two extra instructions.
+ =================================
+ INTERNAL KERNEL ABI FOR FR-V ARCH
+ =================================
+
+The internal FRV kernel ABI is not quite the same as the userspace ABI. A
+number of the registers are used for special purposed, and the ABI is not
+consistent between modules vs core, and MMU vs no-MMU.
+
+This partly stems from the fact that FRV CPUs do not have a separate
+supervisor stack pointer, and most of them do not have any scratch
+registers, thus requiring at least one general purpose register to be
+clobbered in such an event. Also, within the kernel core, it is possible to
+simply jump or call directly between functions using a relative offset.
+This cannot be extended to modules for the displacement is likely to be too
+far. Thus in modules the address of a function to call must be calculated
+in a register and then used, requiring two extra instructions.
This document has the following sections:
@@ -39,7 +41,8 @@ When a system call is made, the following registers are effective:
CPU OPERATING MODES
===================
-The FR-V CPU has three basic operating modes. In order of increasing capability:
+The FR-V CPU has three basic operating modes. In order of increasing
+capability:
(1) User mode.
@@ -47,42 +50,46 @@ The FR-V CPU has three basic operating modes. In order of increasing capability:
(2) Kernel mode.
- Normal kernel mode. There are many additional control registers available that may be
- accessed in this mode, in addition to all the stuff available to user mode. This has two
- submodes:
+ Normal kernel mode. There are many additional control registers
+ available that may be accessed in this mode, in addition to all the
+ stuff available to user mode. This has two submodes:
(a) Exceptions enabled (PSR.T == 1).
- Exceptions will invoke the appropriate normal kernel mode handler. On entry to the
- handler, the PSR.T bit will be cleared.
+ Exceptions will invoke the appropriate normal kernel mode
+ handler. On entry to the handler, the PSR.T bit will be cleared.
(b) Exceptions disabled (PSR.T == 0).
- No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to
- halt unless the CPU is told to jump into debug mode instead.
+ No exceptions or interrupts may happen. Any mandatory exceptions
+ will cause the CPU to halt unless the CPU is told to jump into
+ debug mode instead.
(3) Debug mode.
- No exceptions may happen in this mode. Memory protection and management exceptions will be
- flagged for later consideration, but the exception handler won't be invoked. Debugging traps
- such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by
- debugging events obtained from the other two modes.
+ No exceptions may happen in this mode. Memory protection and
+ management exceptions will be flagged for later consideration, but
+ the exception handler won't be invoked. Debugging traps such as
+ hardware breakpoints and watchpoints will be ignored. This mode is
+ entered only by debugging events obtained from the other two modes.
- All kernel mode registers may be accessed, plus a few extra debugging specific registers.
+ All kernel mode registers may be accessed, plus a few extra debugging
+ specific registers.
=================================
INTERNAL KERNEL-MODE REGISTER ABI
=================================
-There are a number of permanent register assignments that are set up by entry.S in the exception
-prologue. Note that there is a complete set of exception prologues for each of user->kernel
-transition and kernel->kernel transition. There are also user->debug and kernel->debug mode
-transition prologues.
+There are a number of permanent register assignments that are set up by
+entry.S in the exception prologue. Note that there is a complete set of
+exception prologues for each of user->kernel transition and kernel->kernel
+transition. There are also user->debug and kernel->debug mode transition
+prologues.
REGISTER FLAVOUR USE
- =============== ======= ====================================================
+ =============== ======= ==============================================
GR1 Supervisor stack pointer
GR15 Current thread info pointer
GR16 GP-Rel base register for small data
@@ -92,10 +99,12 @@ transition prologues.
GR31 NOMMU Destroyed by debug mode entry
GR31 MMU Destroyed by TLB miss kernel mode entry
CCR.ICC2 Virtual interrupt disablement tracking
- CCCR.CC3 Cleared by exception prologue (atomic op emulation)
+ CCCR.CC3 Cleared by exception prologue
+ (atomic op emulation)
SCR0 MMU See mmu-layout.txt.
SCR1 MMU See mmu-layout.txt.
- SCR2 MMU Save for EAR0 (destroyed by icache insns in debug mode)
+ SCR2 MMU Save for EAR0 (destroyed by icache insns
+ in debug mode)
SCR3 MMU Save for GR31 during debug exceptions
DAMR/IAMR NOMMU Fixed memory protection layout.
DAMR/IAMR MMU See mmu-layout.txt.
@@ -104,18 +113,21 @@ transition prologues.
Certain registers are also used or modified across function calls:
REGISTER CALL RETURN
- =============== =============================== ===============================
+ =============== =============================== ======================
GR0 Fixed Zero -
GR2 Function call frame pointer
GR3 Special Preserved
GR3-GR7 - Clobbered
- GR8 Function call arg #1 Return value (or clobbered)
- GR9 Function call arg #2 Return value MSW (or clobbered)
+ GR8 Function call arg #1 Return value
+ (or clobbered)
+ GR9 Function call arg #2 Return value MSW
+ (or clobbered)
GR10-GR13 Function call arg #3-#6 Clobbered
GR14 - Clobbered
GR15-GR16 Special Preserved
GR17-GR27 - Preserved
- GR28-GR31 Special Only accessed explicitly
+ GR28-GR31 Special Only accessed
+ explicitly
LR Return address after CALL Clobbered
CCR/CCCR - Mostly Clobbered
@@ -124,46 +136,53 @@ Certain registers are also used or modified across function calls:
INTERNAL DEBUG-MODE REGISTER ABI
================================
-This is the same as the kernel-mode register ABI for functions calls. The difference is that in
-debug-mode there's a different stack and a different exception frame. Almost all the global
-registers from kernel-mode (including the stack pointer) may be changed.
+This is the same as the kernel-mode register ABI for functions calls. The
+difference is that in debug-mode there's a different stack and a different
+exception frame. Almost all the global registers from kernel-mode
+(including the stack pointer) may be changed.
REGISTER FLAVOUR USE
- =============== ======= ====================================================
+ =============== ======= ==============================================
GR1 Debug stack pointer
GR16 GP-Rel base register for small data
- GR31 Current debug exception frame pointer (__debug_frame)
+ GR31 Current debug exception frame pointer
+ (__debug_frame)
SCR3 MMU Saved value of GR31
-Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be
-exceedingly careful not to do any that would interact with the main kernel in this regard. Hence
-the debug mode code (gdbstub) is almost completely self-contained. The only external code used is
-the sprintf family of functions.
+Note that debug mode is able to interfere with the kernel's emulated atomic
+ops, so it must be exceedingly careful not to do any that would interact
+with the main kernel in this regard. Hence the debug mode code (gdbstub) is
+almost completely self-contained. The only external code used is the
+sprintf family of functions.
-Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an
-exception. That means unless manually disabled, single-stepping will blithely go on stepping into
-things like interrupts. See gdbstub.txt for more information.
+Futhermore, break.S is so complicated because single-step mode does not
+switch off on entry to an exception. That means unless manually disabled,
+single-stepping will blithely go on stepping into things like interrupts.
+See gdbstub.txt for more information.
==========================
VIRTUAL INTERRUPT HANDLING
==========================
-Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once
-to read and once to write), we don't actually disable interrupts at all if we don't have to. What
-we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we
-then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume
-execution at the point the interrupt happened. Setting condition flags as a side effect of an
-arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the
+Because accesses to the PSR is so slow, and to disable interrupts we have
+to access it twice (once to read and once to write), we don't actually
+disable interrupts at all if we don't have to. What we do instead is use
+the ICC2 condition code flags to note virtual disablement, such that if we
+then do take an interrupt, we note the flag, really disable interrupts, set
+another flag and resume execution at the point the interrupt happened.
+Setting condition flags as a side effect of an arithmetic or logical
+instruction is really fast. This use of the ICC2 only occurs within the
kernel - it does not affect userspace.
The flags we use are:
(*) CCR.ICC2.Z [Zero flag]
- Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be
- modified by logical instructions without affecting the Carry flag.
+ Set to virtually disable interrupts, clear when interrupts are
+ virtually enabled. Can be modified by logical instructions without
+ affecting the Carry flag.
(*) CCR.ICC2.C [Carry flag]
@@ -176,8 +195,9 @@ What happens is this:
ICC2.Z is 0, ICC2.C is 1.
- (2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs
- doing. This is done simply with an unlikely BEQ instruction.
+ (2) An interrupt occurs. The exception prologue examines ICC2.Z and
+ determines that nothing needs doing. This is done simply with an
+ unlikely BEQ instruction.
(3) The interrupts are disabled (local_irq_disable)
@@ -187,48 +207,56 @@ What happens is this:
ICC2.Z would be set to 0.
- A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if
- interrupts were now virtually enabled, but physically disabled - which they're not, so the
- trap isn't taken. The kernel would then be back to state (1).
+ A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would
+ be used to trap if interrupts were now virtually enabled, but
+ physically disabled - which they're not, so the trap isn't taken. The
+ kernel would then be back to state (1).
- (5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt
- shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting
- PSR.PIL to 14 and then it clears ICC2.C.
+ (5) An interrupt occurs. The exception prologue examines ICC2.Z and
+ determines that the interrupt shouldn't actually have happened. It
+ jumps aside, and there disabled interrupts by setting PSR.PIL to 14
+ and then it clears ICC2.C.
(6) If interrupts were then saved and disabled again (local_irq_save):
- ICC2.Z would be shifted into the save variable and masked off (giving a 1).
+ ICC2.Z would be shifted into the save variable and masked off
+ (giving a 1).
- ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0).
+ ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be
+ unaffected (ie: 0).
(7) If interrupts were then restored from state (6) (local_irq_restore):
- ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which
- gives a result of 0 - thus leaving ICC2.Z set.
+ ICC2.Z would be set to indicate the result of XOR'ing the saved
+ value (ie: 1) with 1, which gives a result of 0 - thus leaving
+ ICC2.Z set.
ICC2.C would remain unaffected (ie: 0).
- A TIHI #2 instruction would be used to again assay the current state, but this would do
- nothing as Z==1.
+ A TIHI #2 instruction would be used to again assay the current state,
+ but this would do nothing as Z==1.
(8) If interrupts were then enabled (local_irq_enable):
- ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0.
+ ICC2.Z would be cleared. ICC2.C would be left unaffected. Both
+ flags would now be 0.
- A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0
- [interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true.
+ A TIHI #2 instruction again issued to assay the current state would
+ then trap as both Z==0 [interrupts virtually enabled] and C==0
+ [interrupts really disabled] would then be true.
- (9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to
- 1 and return.
+ (9) The trap #2 handler would simply enable hardware interrupts
+ (set PSR.PIL to 0), set ICC2.C to 1 and return.
(10) Immediately upon returning, the pending interrupt would be taken.
-(11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is
- clear, BEQ fails as per step (2)).
+(11) The interrupt handler would take the path of actually processing the
+ interrupt (ICC2.Z is clear, BEQ fails as per step (2)).
-(12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely
- enabled - or else the kernel wouldn't be here.
+(12) The interrupt handler would then set ICC2.C to 1 since hardware
+ interrupts are definitely enabled - or else the kernel wouldn't be here.
(13) On return from the interrupt handler, things would be back to state (1).
-This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL.
+This trap (#2) is only available in kernel mode. In user mode it will
+result in SIGILL.
diff --git a/Documentation/input/joystick-parport.txt b/Documentation/input/joystick-parport.txt
index 88a011c9f985c..d537c48cc6d02 100644
--- a/Documentation/input/joystick-parport.txt
+++ b/Documentation/input/joystick-parport.txt
@@ -36,12 +36,12 @@ with them.
All NES and SNES use the same synchronous serial protocol, clocked from
the computer's side (and thus timing insensitive). To allow up to 5 NES
-and/or SNES gamepads connected to the parallel port at once, the output
-lines of the parallel port are shared, while one of 5 available input lines
-is assigned to each gamepad.
+and/or SNES gamepads and/or SNES mice connected to the parallel port at once,
+the output lines of the parallel port are shared, while one of 5 available
+input lines is assigned to each gamepad.
This protocol is handled by the gamecon.c driver, so that's the one
-you'll use for NES and SNES gamepads.
+you'll use for NES, SNES gamepads and SNES mice.
The main problem with PC parallel ports is that they don't have +5V power
source on any of their pins. So, if you want a reliable source of power
@@ -106,7 +106,7 @@ A, Turbo B, Select and Start, and is connected through 5 wires, then it is
either a NES or NES clone and will work with this connection. SNES gamepads
also use 5 wires, but have more buttons. They will work as well, of course.
-Pinout for NES gamepads Pinout for SNES gamepads
+Pinout for NES gamepads Pinout for SNES gamepads and mice
+----> Power +-----------------------\
| 7 | o o o o | x x o | 1
@@ -454,6 +454,7 @@ uses the following kernel/module command line:
6 | N64 pad
7 | Sony PSX controller
8 | Sony PSX DDR controller
+ 9 | SNES mouse
The exact type of the PSX controller type is autoprobed when used so
hot swapping should work (but is not recomended).
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f8cb55c30b0fb..b3a6187e53051 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1,4 +1,4 @@
-February 2003 Kernel Parameters v2.5.59
+ Kernel Parameters
~~~~~~~~~~~~~~~~~
The following is a consolidated list of the kernel parameters as implemented
@@ -17,9 +17,17 @@ are specified on the kernel command line with the module name plus
usbcore.blinkenlights=1
-The text in square brackets at the beginning of the description states the
-restrictions on the kernel for the said kernel parameter to be valid. The
-restrictions referred to are that the relevant option is valid if:
+This document may not be entirely up to date and comprehensive. The command
+"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
+module. Loadable modules, after being loaded into the running kernel, also
+reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
+parameters may be changed at runtime by the command
+"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
+
+The parameters listed below are only valid if certain kernel build options were
+enabled and if respective hardware is present. The text in square brackets at
+the beginning of each description states the restrictions within which a
+parameter is applicable:
ACPI ACPI support is enabled.
ALSA ALSA sound support is enabled.
@@ -1046,10 +1054,10 @@ running once the system is up.
noltlbs [PPC] Do not use large page/tlb entries for kernel
lowmem mapping on PPC40x.
- nomce [IA-32] Machine Check Exception
-
nomca [IA-64] Disable machine check abort handling
+ nomce [IA-32] Machine Check Exception
+
noresidual [PPC] Don't use residual data on PReP machines.
noresume [SWSUSP] Disables resume and restores original swap
@@ -1682,20 +1690,6 @@ running once the system is up.
______________________________________________________________________
-Changelog:
-
-2000-06-?? Mr. Unknown
- The last known update (for 2.4.0) - the changelog was not kept before.
-
-2002-11-24 Petr Baudis <pasky@ucw.cz>
- Randy Dunlap <randy.dunlap@verizon.net>
- Update for 2.5.49, description for most of the options introduced,
- references to other documentation (C files, READMEs, ..), added S390,
- PPC, SPARC, MTD, ALSA and OSS category. Minor corrections and
- reformatting.
-
-2005-10-19 Randy Dunlap <rdunlap@xenotime.net>
- Lots of typos, whitespace, some reformatting.
TODO:
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
new file mode 100644
index 0000000000000..8c35c0426110f
--- /dev/null
+++ b/Documentation/leds-class.txt
@@ -0,0 +1,71 @@
+LED handling under Linux
+========================
+
+If you're reading this and thinking about keyboard leds, these are
+handled by the input subsystem and the led class is *not* needed.
+
+In its simplest form, the LED class just allows control of LEDs from
+userspace. LEDs appear in /sys/class/leds/. The brightness file will
+set the brightness of the LED (taking a value 0-255). Most LEDs don't
+have hardware brightness support so will just be turned on for non-zero
+brightness settings.
+
+The class also introduces the optional concept of an LED trigger. A trigger
+is a kernel based source of led events. Triggers can either be simple or
+complex. A simple trigger isn't configurable and is designed to slot into
+existing subsystems with minimal additional code. Examples are the ide-disk,
+nand-disk and sharpsl-charge triggers. With led triggers disabled, the code
+optimises away.
+
+Complex triggers whilst available to all LEDs have LED specific
+parameters and work on a per LED basis. The timer trigger is an example.
+
+You can change triggers in a similar manner to the way an IO scheduler
+is chosen (via /sys/class/leds/<device>/trigger). Trigger specific
+parameters can appear in /sys/class/leds/<device> once a given trigger is
+selected.
+
+
+Design Philosophy
+=================
+
+The underlying design philosophy is simplicity. LEDs are simple devices
+and the aim is to keep a small amount of code giving as much functionality
+as possible. Please keep this in mind when suggesting enhancements.
+
+
+LED Device Naming
+=================
+
+Is currently of the form:
+
+"devicename:colour"
+
+There have been calls for LED properties such as colour to be exported as
+individual led class attributes. As a solution which doesn't incur as much
+overhead, I suggest these become part of the device name. The naming scheme
+above leaves scope for further attributes should they be needed.
+
+
+Known Issues
+============
+
+The LED Trigger core cannot be a module as the simple trigger functions
+would cause nightmare dependency issues. I see this as a minor issue
+compared to the benefits the simple trigger functionality brings. The
+rest of the LED subsystem can be modular.
+
+Some leds can be programmed to flash in hardware. As this isn't a generic
+LED device property, this should be exported as a device specific sysfs
+attribute rather than part of the class if this functionality is required.
+
+
+Future Development
+==================
+
+At the moment, a trigger can't be created specifically for a single LED.
+There are a number of cases where a trigger might only be mappable to a
+particular LED (ACPI?). The addition of triggers provided by the LED driver
+should cover this option and be possible to add without breaking the
+current interface.
+
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
new file mode 100644
index 0000000000000..f8550310a6d5d
--- /dev/null
+++ b/Documentation/memory-barriers.txt
@@ -0,0 +1,1913 @@
+ ============================
+ LINUX KERNEL MEMORY BARRIERS
+ ============================
+
+By: David Howells <dhowells@redhat.com>
+
+Contents:
+
+ (*) Abstract memory access model.
+
+ - Device operations.
+ - Guarantees.
+
+ (*) What are memory barriers?
+
+ - Varieties of memory barrier.
+ - What may not be assumed about memory barriers?
+ - Data dependency barriers.
+ - Control dependencies.
+ - SMP barrier pairing.
+ - Examples of memory barrier sequences.
+
+ (*) Explicit kernel barriers.
+
+ - Compiler barrier.
+ - The CPU memory barriers.
+ - MMIO write barrier.
+
+ (*) Implicit kernel memory barriers.
+
+ - Locking functions.
+ - Interrupt disabling functions.
+ - Miscellaneous functions.
+
+ (*) Inter-CPU locking barrier effects.
+
+ - Locks vs memory accesses.
+ - Locks vs I/O accesses.
+
+ (*) Where are memory barriers needed?
+
+ - Interprocessor interaction.
+ - Atomic operations.
+ - Accessing devices.
+ - Interrupts.
+
+ (*) Kernel I/O barrier effects.
+
+ (*) Assumed minimum execution ordering model.
+
+ (*) The effects of the cpu cache.
+
+ - Cache coherency.
+ - Cache coherency vs DMA.
+ - Cache coherency vs MMIO.
+
+ (*) The things CPUs get up to.
+
+ - And then there's the Alpha.
+
+ (*) References.
+
+
+============================
+ABSTRACT MEMORY ACCESS MODEL
+============================
+
+Consider the following abstract model of the system:
+
+ : :
+ : :
+ : :
+ +-------+ : +--------+ : +-------+
+ | | : | | : | |
+ | | : | | : | |
+ | CPU 1 |<----->| Memory |<----->| CPU 2 |
+ | | : | | : | |
+ | | : | | : | |
+ +-------+ : +--------+ : +-------+
+ ^ : ^ : ^
+ | : | : |
+ | : | : |
+ | : v : |
+ | : +--------+ : |
+ | : | | : |
+ | : | | : |
+ +---------->| Device |<----------+
+ : | | :
+ : | | :
+ : +--------+ :
+ : :
+
+Each CPU executes a program that generates memory access operations. In the
+abstract CPU, memory operation ordering is very relaxed, and a CPU may actually
+perform the memory operations in any order it likes, provided program causality
+appears to be maintained. Similarly, the compiler may also arrange the
+instructions it emits in any order it likes, provided it doesn't affect the
+apparent operation of the program.
+
+So in the above diagram, the effects of the memory operations performed by a
+CPU are perceived by the rest of the system as the operations cross the
+interface between the CPU and rest of the system (the dotted lines).
+
+
+For example, consider the following sequence of events:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { A == 1; B == 2 }
+ A = 3; x = A;
+ B = 4; y = B;
+
+The set of accesses as seen by the memory system in the middle can be arranged
+in 24 different combinations:
+
+ STORE A=3, STORE B=4, x=LOAD A->3, y=LOAD B->4
+ STORE A=3, STORE B=4, y=LOAD B->4, x=LOAD A->3
+ STORE A=3, x=LOAD A->3, STORE B=4, y=LOAD B->4
+ STORE A=3, x=LOAD A->3, y=LOAD B->2, STORE B=4
+ STORE A=3, y=LOAD B->2, STORE B=4, x=LOAD A->3
+ STORE A=3, y=LOAD B->2, x=LOAD A->3, STORE B=4
+ STORE B=4, STORE A=3, x=LOAD A->3, y=LOAD B->4
+ STORE B=4, ...
+ ...
+
+and can thus result in four different combinations of values:
+
+ x == 1, y == 2
+ x == 1, y == 4
+ x == 3, y == 2
+ x == 3, y == 4
+
+
+Furthermore, the stores committed by a CPU to the memory system may not be
+perceived by the loads made by another CPU in the same order as the stores were
+committed.
+
+
+As a further example, consider this sequence of events:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { A == 1, B == 2, C = 3, P == &A, Q == &C }
+ B = 4; Q = P;
+ P = &B D = *Q;
+
+There is an obvious data dependency here, as the value loaded into D depends on
+the address retrieved from P by CPU 2. At the end of the sequence, any of the
+following results are possible:
+
+ (Q == &A) and (D == 1)
+ (Q == &B) and (D == 2)
+ (Q == &B) and (D == 4)
+
+Note that CPU 2 will never try and load C into D because the CPU will load P
+into Q before issuing the load of *Q.
+
+
+DEVICE OPERATIONS
+-----------------
+
+Some devices present their control interfaces as collections of memory
+locations, but the order in which the control registers are accessed is very
+important. For instance, imagine an ethernet card with a set of internal
+registers that are accessed through an address port register (A) and a data
+port register (D). To read internal register 5, the following code might then
+be used:
+
+ *A = 5;
+ x = *D;
+
+but this might show up as either of the following two sequences:
+
+ STORE *A = 5, x = LOAD *D
+ x = LOAD *D, STORE *A = 5
+
+the second of which will almost certainly result in a malfunction, since it set
+the address _after_ attempting to read the register.
+
+
+GUARANTEES
+----------
+
+There are some minimal guarantees that may be expected of a CPU:
+
+ (*) On any given CPU, dependent memory accesses will be issued in order, with
+ respect to itself. This means that for:
+
+ Q = P; D = *Q;
+
+ the CPU will issue the following memory operations:
+
+ Q = LOAD P, D = LOAD *Q
+
+ and always in that order.
+
+ (*) Overlapping loads and stores within a particular CPU will appear to be
+ ordered within that CPU. This means that for:
+
+ a = *X; *X = b;
+
+ the CPU will only issue the following sequence of memory operations:
+
+ a = LOAD *X, STORE *X = b
+
+ And for:
+
+ *X = c; d = *X;
+
+ the CPU will only issue:
+
+ STORE *X = c, d = LOAD *X
+
+ (Loads and stores overlap if they are targetted at overlapping pieces of
+ memory).
+
+And there are a number of things that _must_ or _must_not_ be assumed:
+
+ (*) It _must_not_ be assumed that independent loads and stores will be issued
+ in the order given. This means that for:
+
+ X = *A; Y = *B; *D = Z;
+
+ we may get any of the following sequences:
+
+ X = LOAD *A, Y = LOAD *B, STORE *D = Z
+ X = LOAD *A, STORE *D = Z, Y = LOAD *B
+ Y = LOAD *B, X = LOAD *A, STORE *D = Z
+ Y = LOAD *B, STORE *D = Z, X = LOAD *A
+ STORE *D = Z, X = LOAD *A, Y = LOAD *B
+ STORE *D = Z, Y = LOAD *B, X = LOAD *A
+
+ (*) It _must_ be assumed that overlapping memory accesses may be merged or
+ discarded. This means that for:
+
+ X = *A; Y = *(A + 4);
+
+ we may get any one of the following sequences:
+
+ X = LOAD *A; Y = LOAD *(A + 4);
+ Y = LOAD *(A + 4); X = LOAD *A;
+ {X, Y} = LOAD {*A, *(A + 4) };
+
+ And for:
+
+ *A = X; Y = *A;
+
+ we may get either of:
+
+ STORE *A = X; Y = LOAD *A;
+ STORE *A = Y;
+
+
+=========================
+WHAT ARE MEMORY BARRIERS?
+=========================
+
+As can be seen above, independent memory operations are effectively performed
+in random order, but this can be a problem for CPU-CPU interaction and for I/O.
+What is required is some way of intervening to instruct the compiler and the
+CPU to restrict the order.
+
+Memory barriers are such interventions. They impose a perceived partial
+ordering between the memory operations specified on either side of the barrier.
+They request that the sequence of memory events generated appears to other
+parts of the system as if the barrier is effective on that CPU.
+
+
+VARIETIES OF MEMORY BARRIER
+---------------------------
+
+Memory barriers come in four basic varieties:
+
+ (1) Write (or store) memory barriers.
+
+ A write memory barrier gives a guarantee that all the STORE operations
+ specified before the barrier will appear to happen before all the STORE
+ operations specified after the barrier with respect to the other
+ components of the system.
+
+ A write barrier is a partial ordering on stores only; it is not required
+ to have any effect on loads.
+
+ A CPU can be viewed as as commiting a sequence of store operations to the
+ memory system as time progresses. All stores before a write barrier will
+ occur in the sequence _before_ all the stores after the write barrier.
+
+ [!] Note that write barriers should normally be paired with read or data
+ dependency barriers; see the "SMP barrier pairing" subsection.
+
+
+ (2) Data dependency barriers.
+
+ A data dependency barrier is a weaker form of read barrier. In the case
+ where two loads are performed such that the second depends on the result
+ of the first (eg: the first load retrieves the address to which the second
+ load will be directed), a data dependency barrier would be required to
+ make sure that the target of the second load is updated before the address
+ obtained by the first load is accessed.
+
+ A data dependency barrier is a partial ordering on interdependent loads
+ only; it is not required to have any effect on stores, independent loads
+ or overlapping loads.
+
+ As mentioned in (1), the other CPUs in the system can be viewed as
+ committing sequences of stores to the memory system that the CPU being
+ considered can then perceive. A data dependency barrier issued by the CPU
+ under consideration guarantees that for any load preceding it, if that
+ load touches one of a sequence of stores from another CPU, then by the
+ time the barrier completes, the effects of all the stores prior to that
+ touched by the load will be perceptible to any loads issued after the data
+ dependency barrier.
+
+ See the "Examples of memory barrier sequences" subsection for diagrams
+ showing the ordering constraints.
+
+ [!] Note that the first load really has to have a _data_ dependency and
+ not a control dependency. If the address for the second load is dependent
+ on the first load, but the dependency is through a conditional rather than
+ actually loading the address itself, then it's a _control_ dependency and
+ a full read barrier or better is required. See the "Control dependencies"
+ subsection for more information.
+
+ [!] Note that data dependency barriers should normally be paired with
+ write barriers; see the "SMP barrier pairing" subsection.
+
+
+ (3) Read (or load) memory barriers.
+
+ A read barrier is a data dependency barrier plus a guarantee that all the
+ LOAD operations specified before the barrier will appear to happen before
+ all the LOAD operations specified after the barrier with respect to the
+ other components of the system.
+
+ A read barrier is a partial ordering on loads only; it is not required to
+ have any effect on stores.
+
+ Read memory barriers imply data dependency barriers, and so can substitute
+ for them.
+
+ [!] Note that read barriers should normally be paired with write barriers;
+ see the "SMP barrier pairing" subsection.
+
+
+ (4) General memory barriers.
+
+ A general memory barrier is a combination of both a read memory barrier
+ and a write memory barrier. It is a partial ordering over both loads and
+ stores.
+
+ General memory barriers imply both read and write memory barriers, and so
+ can substitute for either.
+
+
+And a couple of implicit varieties:
+
+ (5) LOCK operations.
+
+ This acts as a one-way permeable barrier. It guarantees that all memory
+ operations after the LOCK operation will appear to happen after the LOCK
+ operation with respect to the other components of the system.
+
+ Memory operations that occur before a LOCK operation may appear to happen
+ after it completes.
+
+ A LOCK operation should almost always be paired with an UNLOCK operation.
+
+
+ (6) UNLOCK operations.
+
+ This also acts as a one-way permeable barrier. It guarantees that all
+ memory operations before the UNLOCK operation will appear to happen before
+ the UNLOCK operation with respect to the other components of the system.
+
+ Memory operations that occur after an UNLOCK operation may appear to
+ happen before it completes.
+
+ LOCK and UNLOCK operations are guaranteed to appear with respect to each
+ other strictly in the order specified.
+
+ The use of LOCK and UNLOCK operations generally precludes the need for
+ other sorts of memory barrier (but note the exceptions mentioned in the
+ subsection "MMIO write barrier").
+
+
+Memory barriers are only required where there's a possibility of interaction
+between two CPUs or between a CPU and a device. If it can be guaranteed that
+there won't be any such interaction in any particular piece of code, then
+memory barriers are unnecessary in that piece of code.
+
+
+Note that these are the _minimum_ guarantees. Different architectures may give
+more substantial guarantees, but they may _not_ be relied upon outside of arch
+specific code.
+
+
+WHAT MAY NOT BE ASSUMED ABOUT MEMORY BARRIERS?
+----------------------------------------------
+
+There are certain things that the Linux kernel memory barriers do not guarantee:
+
+ (*) There is no guarantee that any of the memory accesses specified before a
+ memory barrier will be _complete_ by the completion of a memory barrier
+ instruction; the barrier can be considered to draw a line in that CPU's
+ access queue that accesses of the appropriate type may not cross.
+
+ (*) There is no guarantee that issuing a memory barrier on one CPU will have
+ any direct effect on another CPU or any other hardware in the system. The
+ indirect effect will be the order in which the second CPU sees the effects
+ of the first CPU's accesses occur, but see the next point:
+
+ (*) There is no guarantee that the a CPU will see the correct order of effects
+ from a second CPU's accesses, even _if_ the second CPU uses a memory
+ barrier, unless the first CPU _also_ uses a matching memory barrier (see
+ the subsection on "SMP Barrier Pairing").
+
+ (*) There is no guarantee that some intervening piece of off-the-CPU
+ hardware[*] will not reorder the memory accesses. CPU cache coherency
+ mechanisms should propagate the indirect effects of a memory barrier
+ between CPUs, but might not do so in order.
+
+ [*] For information on bus mastering DMA and coherency please read:
+
+ Documentation/pci.txt
+ Documentation/DMA-mapping.txt
+ Documentation/DMA-API.txt
+
+
+DATA DEPENDENCY BARRIERS
+------------------------
+
+The usage requirements of data dependency barriers are a little subtle, and
+it's not always obvious that they're needed. To illustrate, consider the
+following sequence of events:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { A == 1, B == 2, C = 3, P == &A, Q == &C }
+ B = 4;
+ <write barrier>
+ P = &B
+ Q = P;
+ D = *Q;
+
+There's a clear data dependency here, and it would seem that by the end of the
+sequence, Q must be either &A or &B, and that:
+
+ (Q == &A) implies (D == 1)
+ (Q == &B) implies (D == 4)
+
+But! CPU 2's perception of P may be updated _before_ its perception of B, thus
+leading to the following situation:
+
+ (Q == &B) and (D == 2) ????
+
+Whilst this may seem like a failure of coherency or causality maintenance, it
+isn't, and this behaviour can be observed on certain real CPUs (such as the DEC
+Alpha).
+
+To deal with this, a data dependency barrier must be inserted between the
+address load and the data load:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { A == 1, B == 2, C = 3, P == &A, Q == &C }
+ B = 4;
+ <write barrier>
+ P = &B
+ Q = P;
+ <data dependency barrier>
+ D = *Q;
+
+This enforces the occurrence of one of the two implications, and prevents the
+third possibility from arising.
+
+[!] Note that this extremely counterintuitive situation arises most easily on
+machines with split caches, so that, for example, one cache bank processes
+even-numbered cache lines and the other bank processes odd-numbered cache
+lines. The pointer P might be stored in an odd-numbered cache line, and the
+variable B might be stored in an even-numbered cache line. Then, if the
+even-numbered bank of the reading CPU's cache is extremely busy while the
+odd-numbered bank is idle, one can see the new value of the pointer P (&B),
+but the old value of the variable B (1).
+
+
+Another example of where data dependency barriers might by required is where a
+number is read from memory and then used to calculate the index for an array
+access:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 }
+ M[1] = 4;
+ <write barrier>
+ P = 1
+ Q = P;
+ <data dependency barrier>
+ D = M[Q];
+
+
+The data dependency barrier is very important to the RCU system, for example.
+See rcu_dereference() in include/linux/rcupdate.h. This permits the current
+target of an RCU'd pointer to be replaced with a new modified target, without
+the replacement target appearing to be incompletely initialised.
+
+See also the subsection on "Cache Coherency" for a more thorough example.
+
+
+CONTROL DEPENDENCIES
+--------------------
+
+A control dependency requires a full read memory barrier, not simply a data
+dependency barrier to make it work correctly. Consider the following bit of
+code:
+
+ q = &a;
+ if (p)
+ q = &b;
+ <data dependency barrier>
+ x = *q;
+
+This will not have the desired effect because there is no actual data
+dependency, but rather a control dependency that the CPU may short-circuit by
+attempting to predict the outcome in advance. In such a case what's actually
+required is:
+
+ q = &a;
+ if (p)
+ q = &b;
+ <read barrier>
+ x = *q;
+
+
+SMP BARRIER PAIRING
+-------------------
+
+When dealing with CPU-CPU interactions, certain types of memory barrier should
+always be paired. A lack of appropriate pairing is almost certainly an error.
+
+A write barrier should always be paired with a data dependency barrier or read
+barrier, though a general barrier would also be viable. Similarly a read
+barrier or a data dependency barrier should always be paired with at least an
+write barrier, though, again, a general barrier is viable:
+
+ CPU 1 CPU 2
+ =============== ===============
+ a = 1;
+ <write barrier>
+ b = 2; x = a;
+ <read barrier>
+ y = b;
+
+Or:
+
+ CPU 1 CPU 2
+ =============== ===============================
+ a = 1;
+ <write barrier>
+ b = &a; x = b;
+ <data dependency barrier>
+ y = *x;
+
+Basically, the read barrier always has to be there, even though it can be of
+the "weaker" type.
+
+
+EXAMPLES OF MEMORY BARRIER SEQUENCES
+------------------------------------
+
+Firstly, write barriers act as a partial orderings on store operations.
+Consider the following sequence of events:
+
+ CPU 1
+ =======================
+ STORE A = 1
+ STORE B = 2
+ STORE C = 3
+ <write barrier>
+ STORE D = 4
+ STORE E = 5
+
+This sequence of events is committed to the memory coherence system in an order
+that the rest of the system might perceive as the unordered set of { STORE A,
+STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
+}:
+
+ +-------+ : :
+ | | +------+
+ | |------>| C=3 | } /\
+ | | : +------+ }----- \ -----> Events perceptible
+ | | : | A=1 | } \/ to rest of system
+ | | : +------+ }
+ | CPU 1 | : | B=2 | }
+ | | +------+ }
+ | | wwwwwwwwwwwwwwww } <--- At this point the write barrier
+ | | +------+ } requires all stores prior to the
+ | | : | E=5 | } barrier to be committed before
+ | | : +------+ } further stores may be take place.
+ | |------>| D=4 | }
+ | | +------+
+ +-------+ : :
+ |
+ | Sequence in which stores committed to memory system
+ | by CPU 1
+ V
+
+
+Secondly, data dependency barriers act as a partial orderings on data-dependent
+loads. Consider the following sequence of events:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ STORE A = 1
+ STORE B = 2
+ <write barrier>
+ STORE C = &B LOAD X
+ STORE D = 4 LOAD C (gets &B)
+ LOAD *C (reads B)
+
+Without intervention, CPU 2 may perceive the events on CPU 1 in some
+effectively random order, despite the write barrier issued by CPU 1:
+
+ +-------+ : : : :
+ | | +------+ +-------+ | Sequence of update
+ | |------>| B=2 |----- --->| Y->8 | | of perception on
+ | | : +------+ \ +-------+ | CPU 2
+ | CPU 1 | : | A=1 | \ --->| C->&Y | V
+ | | +------+ | +-------+
+ | | wwwwwwwwwwwwwwww | : :
+ | | +------+ | : :
+ | | : | C=&B |--- | : : +-------+
+ | | : +------+ \ | +-------+ | |
+ | |------>| D=4 | ----------->| C->&B |------>| |
+ | | +------+ | +-------+ | |
+ +-------+ : : | : : | |
+ | : : | |
+ | : : | CPU 2 |
+ | +-------+ | |
+ Apparently incorrect ---> | | B->7 |------>| |
+ perception of B (!) | +-------+ | |
+ | : : | |
+ | +-------+ | |
+ The load of X holds ---> \ | X->9 |------>| |
+ up the maintenance \ +-------+ | |
+ of coherence of B ----->| B->2 | +-------+
+ +-------+
+ : :
+
+
+In the above example, CPU 2 perceives that B is 7, despite the load of *C
+(which would be B) coming after the the LOAD of C.
+
+If, however, a data dependency barrier were to be placed between the load of C
+and the load of *C (ie: B) on CPU 2, then the following will occur:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| B=2 |----- --->| Y->8 |
+ | | : +------+ \ +-------+
+ | CPU 1 | : | A=1 | \ --->| C->&Y |
+ | | +------+ | +-------+
+ | | wwwwwwwwwwwwwwww | : :
+ | | +------+ | : :
+ | | : | C=&B |--- | : : +-------+
+ | | : +------+ \ | +-------+ | |
+ | |------>| D=4 | ----------->| C->&B |------>| |
+ | | +------+ | +-------+ | |
+ +-------+ : : | : : | |
+ | : : | |
+ | : : | CPU 2 |
+ | +-------+ | |
+ \ | X->9 |------>| |
+ \ +-------+ | |
+ ----->| B->2 | | |
+ +-------+ | |
+ Makes sure all effects ---> ddddddddddddddddd | |
+ prior to the store of C +-------+ | |
+ are perceptible to | B->2 |------>| |
+ successive loads +-------+ | |
+ : : +-------+
+
+
+And thirdly, a read barrier acts as a partial order on loads. Consider the
+following sequence of events:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ STORE A=1
+ STORE B=2
+ STORE C=3
+ <write barrier>
+ STORE D=4
+ STORE E=5
+ LOAD A
+ LOAD B
+ LOAD C
+ LOAD D
+ LOAD E
+
+Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
+some effectively random order, despite the write barrier issued by CPU 1:
+
+ +-------+ : :
+ | | +------+
+ | |------>| C=3 | }
+ | | : +------+ }
+ | | : | A=1 | }
+ | | : +------+ }
+ | CPU 1 | : | B=2 | }---
+ | | +------+ } \
+ | | wwwwwwwwwwwww} \
+ | | +------+ } \ : : +-------+
+ | | : | E=5 | } \ +-------+ | |
+ | | : +------+ } \ { | C->3 |------>| |
+ | |------>| D=4 | } \ { +-------+ : | |
+ | | +------+ \ { | E->5 | : | |
+ +-------+ : : \ { +-------+ : | |
+ Transfer -->{ | A->1 | : | CPU 2 |
+ from CPU 1 { +-------+ : | |
+ to CPU 2 { | D->4 | : | |
+ { +-------+ : | |
+ { | B->2 |------>| |
+ +-------+ | |
+ : : +-------+
+
+
+If, however, a read barrier were to be placed between the load of C and the
+load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
+perceived correctly by CPU 2.
+
+ +-------+ : :
+ | | +------+
+ | |------>| C=3 | }
+ | | : +------+ }
+ | | : | A=1 | }---
+ | | : +------+ } \
+ | CPU 1 | : | B=2 | } \
+ | | +------+ \
+ | | wwwwwwwwwwwwwwww \
+ | | +------+ \ : : +-------+
+ | | : | E=5 | } \ +-------+ | |
+ | | : +------+ }--- \ { | C->3 |------>| |
+ | |------>| D=4 | } \ \ { +-------+ : | |
+ | | +------+ \ -->{ | B->2 | : | |
+ +-------+ : : \ { +-------+ : | |
+ \ { | A->1 | : | CPU 2 |
+ \ +-------+ | |
+ At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
+ barrier causes all effects \ +-------+ | |
+ prior to the storage of C \ { | E->5 | : | |
+ to be perceptible to CPU 2 -->{ +-------+ : | |
+ { | D->4 |------>| |
+ +-------+ | |
+ : : +-------+
+
+
+========================
+EXPLICIT KERNEL BARRIERS
+========================
+
+The Linux kernel has a variety of different barriers that act at different
+levels:
+
+ (*) Compiler barrier.
+
+ (*) CPU memory barriers.
+
+ (*) MMIO write barrier.
+
+
+COMPILER BARRIER
+----------------
+
+The Linux kernel has an explicit compiler barrier function that prevents the
+compiler from moving the memory accesses either side of it to the other side:
+
+ barrier();
+
+This a general barrier - lesser varieties of compiler barrier do not exist.
+
+The compiler barrier has no direct effect on the CPU, which may then reorder
+things however it wishes.
+
+
+CPU MEMORY BARRIERS
+-------------------
+
+The Linux kernel has eight basic CPU memory barriers:
+
+ TYPE MANDATORY SMP CONDITIONAL
+ =============== ======================= ===========================
+ GENERAL mb() smp_mb()
+ WRITE wmb() smp_wmb()
+ READ rmb() smp_rmb()
+ DATA DEPENDENCY read_barrier_depends() smp_read_barrier_depends()
+
+
+All CPU memory barriers unconditionally imply compiler barriers.
+
+SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
+systems because it is assumed that a CPU will be appear to be self-consistent,
+and will order overlapping accesses correctly with respect to itself.
+
+[!] Note that SMP memory barriers _must_ be used to control the ordering of
+references to shared memory on SMP systems, though the use of locking instead
+is sufficient.
+
+Mandatory barriers should not be used to control SMP effects, since mandatory
+barriers unnecessarily impose overhead on UP systems. They may, however, be
+used to control MMIO effects on accesses through relaxed memory I/O windows.
+These are required even on non-SMP systems as they affect the order in which
+memory operations appear to a device by prohibiting both the compiler and the
+CPU from reordering them.
+
+
+There are some more advanced barrier functions:
+
+ (*) set_mb(var, value)
+ (*) set_wmb(var, value)
+
+ These assign the value to the variable and then insert at least a write
+ barrier after it, depending on the function. They aren't guaranteed to
+ insert anything more than a compiler barrier in a UP compilation.
+
+
+ (*) smp_mb__before_atomic_dec();
+ (*) smp_mb__after_atomic_dec();
+ (*) smp_mb__before_atomic_inc();
+ (*) smp_mb__after_atomic_inc();
+
+ These are for use with atomic add, subtract, increment and decrement
+ functions, especially when used for reference counting. These functions
+ do not imply memory barriers.
+
+ As an example, consider a piece of code that marks an object as being dead
+ and then decrements the object's reference count:
+
+ obj->dead = 1;
+ smp_mb__before_atomic_dec();
+ atomic_dec(&obj->ref_count);
+
+ This makes sure that the death mark on the object is perceived to be set
+ *before* the reference counter is decremented.
+
+ See Documentation/atomic_ops.txt for more information. See the "Atomic
+ operations" subsection for information on where to use these.
+
+
+ (*) smp_mb__before_clear_bit(void);
+ (*) smp_mb__after_clear_bit(void);
+
+ These are for use similar to the atomic inc/dec barriers. These are
+ typically used for bitwise unlocking operations, so care must be taken as
+ there are no implicit memory barriers here either.
+
+ Consider implementing an unlock operation of some nature by clearing a
+ locking bit. The clear_bit() would then need to be barriered like this:
+
+ smp_mb__before_clear_bit();
+ clear_bit( ... );
+
+ This prevents memory operations before the clear leaking to after it. See
+ the subsection on "Locking Functions" with reference to UNLOCK operation
+ implications.
+
+ See Documentation/atomic_ops.txt for more information. See the "Atomic
+ operations" subsection for information on where to use these.
+
+
+MMIO WRITE BARRIER
+------------------
+
+The Linux kernel also has a special barrier for use with memory-mapped I/O
+writes:
+
+ mmiowb();
+
+This is a variation on the mandatory write barrier that causes writes to weakly
+ordered I/O regions to be partially ordered. Its effects may go beyond the
+CPU->Hardware interface and actually affect the hardware at some level.
+
+See the subsection "Locks vs I/O accesses" for more information.
+
+
+===============================
+IMPLICIT KERNEL MEMORY BARRIERS
+===============================
+
+Some of the other functions in the linux kernel imply memory barriers, amongst
+which are locking, scheduling and memory allocation functions.
+
+This specification is a _minimum_ guarantee; any particular architecture may
+provide more substantial guarantees, but these may not be relied upon outside
+of arch specific code.
+
+
+LOCKING FUNCTIONS
+-----------------
+
+The Linux kernel has a number of locking constructs:
+
+ (*) spin locks
+ (*) R/W spin locks
+ (*) mutexes
+ (*) semaphores
+ (*) R/W semaphores
+ (*) RCU
+
+In all cases there are variants on "LOCK" operations and "UNLOCK" operations
+for each construct. These operations all imply certain barriers:
+
+ (1) LOCK operation implication:
+
+ Memory operations issued after the LOCK will be completed after the LOCK
+ operation has completed.
+
+ Memory operations issued before the LOCK may be completed after the LOCK
+ operation has completed.
+
+ (2) UNLOCK operation implication:
+
+ Memory operations issued before the UNLOCK will be completed before the
+ UNLOCK operation has completed.
+
+ Memory operations issued after the UNLOCK may be completed before the
+ UNLOCK operation has completed.
+
+ (3) LOCK vs LOCK implication:
+
+ All LOCK operations issued before another LOCK operation will be completed
+ before that LOCK operation.
+
+ (4) LOCK vs UNLOCK implication:
+
+ All LOCK operations issued before an UNLOCK operation will be completed
+ before the UNLOCK operation.
+
+ All UNLOCK operations issued before a LOCK operation will be completed
+ before the LOCK operation.
+
+ (5) Failed conditional LOCK implication:
+
+ Certain variants of the LOCK operation may fail, either due to being
+ unable to get the lock immediately, or due to receiving an unblocked
+ signal whilst asleep waiting for the lock to become available. Failed
+ locks do not imply any sort of barrier.
+
+Therefore, from (1), (2) and (4) an UNLOCK followed by an unconditional LOCK is
+equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
+
+[!] Note: one of the consequence of LOCKs and UNLOCKs being only one-way
+ barriers is that the effects instructions outside of a critical section may
+ seep into the inside of the critical section.
+
+Locks and semaphores may not provide any guarantee of ordering on UP compiled
+systems, and so cannot be counted on in such a situation to actually achieve
+anything at all - especially with respect to I/O accesses - unless combined
+with interrupt disabling operations.
+
+See also the section on "Inter-CPU locking barrier effects".
+
+
+As an example, consider the following:
+
+ *A = a;
+ *B = b;
+ LOCK
+ *C = c;
+ *D = d;
+ UNLOCK
+ *E = e;
+ *F = f;
+
+The following sequence of events is acceptable:
+
+ LOCK, {*F,*A}, *E, {*C,*D}, *B, UNLOCK
+
+ [+] Note that {*F,*A} indicates a combined access.
+
+But none of the following are:
+
+ {*F,*A}, *B, LOCK, *C, *D, UNLOCK, *E
+ *A, *B, *C, LOCK, *D, UNLOCK, *E, *F
+ *A, *B, LOCK, *C, UNLOCK, *D, *E, *F
+ *B, LOCK, *C, *D, UNLOCK, {*F,*A}, *E
+
+
+
+INTERRUPT DISABLING FUNCTIONS
+-----------------------------
+
+Functions that disable interrupts (LOCK equivalent) and enable interrupts
+(UNLOCK equivalent) will act as compiler barriers only. So if memory or I/O
+barriers are required in such a situation, they must be provided from some
+other means.
+
+
+MISCELLANEOUS FUNCTIONS
+-----------------------
+
+Other functions that imply barriers:
+
+ (*) schedule() and similar imply full memory barriers.
+
+ (*) Memory allocation and release functions imply full memory barriers.
+
+
+=================================
+INTER-CPU LOCKING BARRIER EFFECTS
+=================================
+
+On SMP systems locking primitives give a more substantial form of barrier: one
+that does affect memory access ordering on other CPUs, within the context of
+conflict on any particular lock.
+
+
+LOCKS VS MEMORY ACCESSES
+------------------------
+
+Consider the following: the system has a pair of spinlocks (N) and (Q), and
+three CPUs; then should the following sequence of events occur:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ *A = a; *E = e;
+ LOCK M LOCK Q
+ *B = b; *F = f;
+ *C = c; *G = g;
+ UNLOCK M UNLOCK Q
+ *D = d; *H = h;
+
+Then there is no guarantee as to what order CPU #3 will see the accesses to *A
+through *H occur in, other than the constraints imposed by the separate locks
+on the separate CPUs. It might, for example, see:
+
+ *E, LOCK M, LOCK Q, *G, *C, *F, *A, *B, UNLOCK Q, *D, *H, UNLOCK M
+
+But it won't see any of:
+
+ *B, *C or *D preceding LOCK M
+ *A, *B or *C following UNLOCK M
+ *F, *G or *H preceding LOCK Q
+ *E, *F or *G following UNLOCK Q
+
+
+However, if the following occurs:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ *A = a;
+ LOCK M [1]
+ *B = b;
+ *C = c;
+ UNLOCK M [1]
+ *D = d; *E = e;
+ LOCK M [2]
+ *F = f;
+ *G = g;
+ UNLOCK M [2]
+ *H = h;
+
+CPU #3 might see:
+
+ *E, LOCK M [1], *C, *B, *A, UNLOCK M [1],
+ LOCK M [2], *H, *F, *G, UNLOCK M [2], *D
+
+But assuming CPU #1 gets the lock first, it won't see any of:
+
+ *B, *C, *D, *F, *G or *H preceding LOCK M [1]
+ *A, *B or *C following UNLOCK M [1]
+ *F, *G or *H preceding LOCK M [2]
+ *A, *B, *C, *E, *F or *G following UNLOCK M [2]
+
+
+LOCKS VS I/O ACCESSES
+---------------------
+
+Under certain circumstances (especially involving NUMA), I/O accesses within
+two spinlocked sections on two different CPUs may be seen as interleaved by the
+PCI bridge, because the PCI bridge does not necessarily participate in the
+cache-coherence protocol, and is therefore incapable of issuing the required
+read memory barriers.
+
+For example:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ spin_lock(Q)
+ writel(0, ADDR)
+ writel(1, DATA);
+ spin_unlock(Q);
+ spin_lock(Q);
+ writel(4, ADDR);
+ writel(5, DATA);
+ spin_unlock(Q);
+
+may be seen by the PCI bridge as follows:
+
+ STORE *ADDR = 0, STORE *ADDR = 4, STORE *DATA = 1, STORE *DATA = 5
+
+which would probably cause the hardware to malfunction.
+
+
+What is necessary here is to intervene with an mmiowb() before dropping the
+spinlock, for example:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ spin_lock(Q)
+ writel(0, ADDR)
+ writel(1, DATA);
+ mmiowb();
+ spin_unlock(Q);
+ spin_lock(Q);
+ writel(4, ADDR);
+ writel(5, DATA);
+ mmiowb();
+ spin_unlock(Q);
+
+this will ensure that the two stores issued on CPU #1 appear at the PCI bridge
+before either of the stores issued on CPU #2.
+
+
+Furthermore, following a store by a load to the same device obviates the need
+for an mmiowb(), because the load forces the store to complete before the load
+is performed:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ spin_lock(Q)
+ writel(0, ADDR)
+ a = readl(DATA);
+ spin_unlock(Q);
+ spin_lock(Q);
+ writel(4, ADDR);
+ b = readl(DATA);
+ spin_unlock(Q);
+
+
+See Documentation/DocBook/deviceiobook.tmpl for more information.
+
+
+=================================
+WHERE ARE MEMORY BARRIERS NEEDED?
+=================================
+
+Under normal operation, memory operation reordering is generally not going to
+be a problem as a single-threaded linear piece of code will still appear to
+work correctly, even if it's in an SMP kernel. There are, however, three
+circumstances in which reordering definitely _could_ be a problem:
+
+ (*) Interprocessor interaction.
+
+ (*) Atomic operations.
+
+ (*) Accessing devices (I/O).
+
+ (*) Interrupts.
+
+
+INTERPROCESSOR INTERACTION
+--------------------------
+
+When there's a system with more than one processor, more than one CPU in the
+system may be working on the same data set at the same time. This can cause
+synchronisation problems, and the usual way of dealing with them is to use
+locks. Locks, however, are quite expensive, and so it may be preferable to
+operate without the use of a lock if at all possible. In such a case
+operations that affect both CPUs may have to be carefully ordered to prevent
+a malfunction.
+
+Consider, for example, the R/W semaphore slow path. Here a waiting process is
+queued on the semaphore, by virtue of it having a piece of its stack linked to
+the semaphore's list of waiting processes:
+
+ struct rw_semaphore {
+ ...
+ spinlock_t lock;
+ struct list_head waiters;
+ };
+
+ struct rwsem_waiter {
+ struct list_head list;
+ struct task_struct *task;
+ };
+
+To wake up a particular waiter, the up_read() or up_write() functions have to:
+
+ (1) read the next pointer from this waiter's record to know as to where the
+ next waiter record is;
+
+ (4) read the pointer to the waiter's task structure;
+
+ (3) clear the task pointer to tell the waiter it has been given the semaphore;
+
+ (4) call wake_up_process() on the task; and
+
+ (5) release the reference held on the waiter's task struct.
+
+In otherwords, it has to perform this sequence of events:
+
+ LOAD waiter->list.next;
+ LOAD waiter->task;
+ STORE waiter->task;
+ CALL wakeup
+ RELEASE task
+
+and if any of these steps occur out of order, then the whole thing may
+malfunction.
+
+Once it has queued itself and dropped the semaphore lock, the waiter does not
+get the lock again; it instead just waits for its task pointer to be cleared
+before proceeding. Since the record is on the waiter's stack, this means that
+if the task pointer is cleared _before_ the next pointer in the list is read,
+another CPU might start processing the waiter and might clobber the waiter's
+stack before the up*() function has a chance to read the next pointer.
+
+Consider then what might happen to the above sequence of events:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ down_xxx()
+ Queue waiter
+ Sleep
+ up_yyy()
+ LOAD waiter->task;
+ STORE waiter->task;
+ Woken up by other event
+ <preempt>
+ Resume processing
+ down_xxx() returns
+ call foo()
+ foo() clobbers *waiter
+ </preempt>
+ LOAD waiter->list.next;
+ --- OOPS ---
+
+This could be dealt with using the semaphore lock, but then the down_xxx()
+function has to needlessly get the spinlock again after being woken up.
+
+The way to deal with this is to insert a general SMP memory barrier:
+
+ LOAD waiter->list.next;
+ LOAD waiter->task;
+ smp_mb();
+ STORE waiter->task;
+ CALL wakeup
+ RELEASE task
+
+In this case, the barrier makes a guarantee that all memory accesses before the
+barrier will appear to happen before all the memory accesses after the barrier
+with respect to the other CPUs on the system. It does _not_ guarantee that all
+the memory accesses before the barrier will be complete by the time the barrier
+instruction itself is complete.
+
+On a UP system - where this wouldn't be a problem - the smp_mb() is just a
+compiler barrier, thus making sure the compiler emits the instructions in the
+right order without actually intervening in the CPU. Since there there's only
+one CPU, that CPU's dependency ordering logic will take care of everything
+else.
+
+
+ATOMIC OPERATIONS
+-----------------
+
+Though they are technically interprocessor interaction considerations, atomic
+operations are noted specially as they do _not_ generally imply memory
+barriers. The possible offenders include:
+
+ xchg();
+ cmpxchg();
+ test_and_set_bit();
+ test_and_clear_bit();
+ test_and_change_bit();
+ atomic_cmpxchg();
+ atomic_inc_return();
+ atomic_dec_return();
+ atomic_add_return();
+ atomic_sub_return();
+ atomic_inc_and_test();
+ atomic_dec_and_test();
+ atomic_sub_and_test();
+ atomic_add_negative();
+ atomic_add_unless();
+
+These may be used for such things as implementing LOCK operations or controlling
+the lifetime of objects by decreasing their reference counts. In such cases
+they need preceding memory barriers.
+
+The following may also be possible offenders as they may be used as UNLOCK
+operations.
+
+ set_bit();
+ clear_bit();
+ change_bit();
+ atomic_set();
+
+
+The following are a little tricky:
+
+ atomic_add();
+ atomic_sub();
+ atomic_inc();
+ atomic_dec();
+
+If they're used for statistics generation, then they probably don't need memory
+barriers, unless there's a coupling between statistical data.
+
+If they're used for reference counting on an object to control its lifetime,
+they probably don't need memory barriers because either the reference count
+will be adjusted inside a locked section, or the caller will already hold
+sufficient references to make the lock, and thus a memory barrier unnecessary.
+
+If they're used for constructing a lock of some description, then they probably
+do need memory barriers as a lock primitive generally has to do things in a
+specific order.
+
+
+Basically, each usage case has to be carefully considered as to whether memory
+barriers are needed or not. The simplest rule is probably: if the atomic
+operation is protected by a lock, then it does not require a barrier unless
+there's another operation within the critical section with respect to which an
+ordering must be maintained.
+
+See Documentation/atomic_ops.txt for more information.
+
+
+ACCESSING DEVICES
+-----------------
+
+Many devices can be memory mapped, and so appear to the CPU as if they're just
+a set of memory locations. To control such a device, the driver usually has to
+make the right memory accesses in exactly the right order.
+
+However, having a clever CPU or a clever compiler creates a potential problem
+in that the carefully sequenced accesses in the driver code won't reach the
+device in the requisite order if the CPU or the compiler thinks it is more
+efficient to reorder, combine or merge accesses - something that would cause
+the device to malfunction.
+
+Inside of the Linux kernel, I/O should be done through the appropriate accessor
+routines - such as inb() or writel() - which know how to make such accesses
+appropriately sequential. Whilst this, for the most part, renders the explicit
+use of memory barriers unnecessary, there are a couple of situations where they
+might be needed:
+
+ (1) On some systems, I/O stores are not strongly ordered across all CPUs, and
+ so for _all_ general drivers locks should be used and mmiowb() must be
+ issued prior to unlocking the critical section.
+
+ (2) If the accessor functions are used to refer to an I/O memory window with
+ relaxed memory access properties, then _mandatory_ memory barriers are
+ required to enforce ordering.
+
+See Documentation/DocBook/deviceiobook.tmpl for more information.
+
+
+INTERRUPTS
+----------
+
+A driver may be interrupted by its own interrupt service routine, and thus the
+two parts of the driver may interfere with each other's attempts to control or
+access the device.
+
+This may be alleviated - at least in part - by disabling local interrupts (a
+form of locking), such that the critical operations are all contained within
+the interrupt-disabled section in the driver. Whilst the driver's interrupt
+routine is executing, the driver's core may not run on the same CPU, and its
+interrupt is not permitted to happen again until the current interrupt has been
+handled, thus the interrupt handler does not need to lock against that.
+
+However, consider a driver that was talking to an ethernet card that sports an
+address register and a data register. If that driver's core talks to the card
+under interrupt-disablement and then the driver's interrupt handler is invoked:
+
+ LOCAL IRQ DISABLE
+ writew(ADDR, 3);
+ writew(DATA, y);
+ LOCAL IRQ ENABLE
+ <interrupt>
+ writew(ADDR, 4);
+ q = readw(DATA);
+ </interrupt>
+
+The store to the data register might happen after the second store to the
+address register if ordering rules are sufficiently relaxed:
+
+ STORE *ADDR = 3, STORE *ADDR = 4, STORE *DATA = y, q = LOAD *DATA
+
+
+If ordering rules are relaxed, it must be assumed that accesses done inside an
+interrupt disabled section may leak outside of it and may interleave with
+accesses performed in an interrupt - and vice versa - unless implicit or
+explicit barriers are used.
+
+Normally this won't be a problem because the I/O accesses done inside such
+sections will include synchronous load operations on strictly ordered I/O
+registers that form implicit I/O barriers. If this isn't sufficient then an
+mmiowb() may need to be used explicitly.
+
+
+A similar situation may occur between an interrupt routine and two routines
+running on separate CPUs that communicate with each other. If such a case is
+likely, then interrupt-disabling locks should be used to guarantee ordering.
+
+
+==========================
+KERNEL I/O BARRIER EFFECTS
+==========================
+
+When accessing I/O memory, drivers should use the appropriate accessor
+functions:
+
+ (*) inX(), outX():
+
+ These are intended to talk to I/O space rather than memory space, but
+ that's primarily a CPU-specific concept. The i386 and x86_64 processors do
+ indeed have special I/O space access cycles and instructions, but many
+ CPUs don't have such a concept.
+
+ The PCI bus, amongst others, defines an I/O space concept - which on such
+ CPUs as i386 and x86_64 cpus readily maps to the CPU's concept of I/O
+ space. However, it may also mapped as a virtual I/O space in the CPU's
+ memory map, particularly on those CPUs that don't support alternate
+ I/O spaces.
+
+ Accesses to this space may be fully synchronous (as on i386), but
+ intermediary bridges (such as the PCI host bridge) may not fully honour
+ that.
+
+ They are guaranteed to be fully ordered with respect to each other.
+
+ They are not guaranteed to be fully ordered with respect to other types of
+ memory and I/O operation.
+
+ (*) readX(), writeX():
+
+ Whether these are guaranteed to be fully ordered and uncombined with
+ respect to each other on the issuing CPU depends on the characteristics
+ defined for the memory window through which they're accessing. On later
+ i386 architecture machines, for example, this is controlled by way of the
+ MTRR registers.
+
+ Ordinarily, these will be guaranteed to be fully ordered and uncombined,,
+ provided they're not accessing a prefetchable device.
+
+ However, intermediary hardware (such as a PCI bridge) may indulge in
+ deferral if it so wishes; to flush a store, a load from the same location
+ is preferred[*], but a load from the same device or from configuration
+ space should suffice for PCI.
+
+ [*] NOTE! attempting to load from the same location as was written to may
+ cause a malfunction - consider the 16550 Rx/Tx serial registers for
+ example.
+
+ Used with prefetchable I/O memory, an mmiowb() barrier may be required to
+ force stores to be ordered.
+
+ Please refer to the PCI specification for more information on interactions
+ between PCI transactions.
+
+ (*) readX_relaxed()
+
+ These are similar to readX(), but are not guaranteed to be ordered in any
+ way. Be aware that there is no I/O read barrier available.
+
+ (*) ioreadX(), iowriteX()
+
+ These will perform as appropriate for the type of access they're actually
+ doing, be it inX()/outX() or readX()/writeX().
+
+
+========================================
+ASSUMED MINIMUM EXECUTION ORDERING MODEL
+========================================
+
+It has to be assumed that the conceptual CPU is weakly-ordered but that it will
+maintain the appearance of program causality with respect to itself. Some CPUs
+(such as i386 or x86_64) are more constrained than others (such as powerpc or
+frv), and so the most relaxed case (namely DEC Alpha) must be assumed outside
+of arch-specific code.
+
+This means that it must be considered that the CPU will execute its instruction
+stream in any order it feels like - or even in parallel - provided that if an
+instruction in the stream depends on the an earlier instruction, then that
+earlier instruction must be sufficiently complete[*] before the later
+instruction may proceed; in other words: provided that the appearance of
+causality is maintained.
+
+ [*] Some instructions have more than one effect - such as changing the
+ condition codes, changing registers or changing memory - and different
+ instructions may depend on different effects.
+
+A CPU may also discard any instruction sequence that winds up having no
+ultimate effect. For example, if two adjacent instructions both load an
+immediate value into the same register, the first may be discarded.
+
+
+Similarly, it has to be assumed that compiler might reorder the instruction
+stream in any way it sees fit, again provided the appearance of causality is
+maintained.
+
+
+============================
+THE EFFECTS OF THE CPU CACHE
+============================
+
+The way cached memory operations are perceived across the system is affected to
+a certain extent by the caches that lie between CPUs and memory, and by the
+memory coherence system that maintains the consistency of state in the system.
+
+As far as the way a CPU interacts with another part of the system through the
+caches goes, the memory system has to include the CPU's caches, and memory
+barriers for the most part act at the interface between the CPU and its cache
+(memory barriers logically act on the dotted line in the following diagram):
+
+ <--- CPU ---> : <----------- Memory ----------->
+ :
+ +--------+ +--------+ : +--------+ +-----------+
+ | | | | : | | | | +--------+
+ | CPU | | Memory | : | CPU | | | | |
+ | Core |--->| Access |----->| Cache |<-->| | | |
+ | | | Queue | : | | | |--->| Memory |
+ | | | | : | | | | | |
+ +--------+ +--------+ : +--------+ | | | |
+ : | Cache | +--------+
+ : | Coherency |
+ : | Mechanism | +--------+
+ +--------+ +--------+ : +--------+ | | | |
+ | | | | : | | | | | |
+ | CPU | | Memory | : | CPU | | |--->| Device |
+ | Core |--->| Access |----->| Cache |<-->| | | |
+ | | | Queue | : | | | | | |
+ | | | | : | | | | +--------+
+ +--------+ +--------+ : +--------+ +-----------+
+ :
+ :
+
+Although any particular load or store may not actually appear outside of the
+CPU that issued it since it may have been satisfied within the CPU's own cache,
+it will still appear as if the full memory access had taken place as far as the
+other CPUs are concerned since the cache coherency mechanisms will migrate the
+cacheline over to the accessing CPU and propagate the effects upon conflict.
+
+The CPU core may execute instructions in any order it deems fit, provided the
+expected program causality appears to be maintained. Some of the instructions
+generate load and store operations which then go into the queue of memory
+accesses to be performed. The core may place these in the queue in any order
+it wishes, and continue execution until it is forced to wait for an instruction
+to complete.
+
+What memory barriers are concerned with is controlling the order in which
+accesses cross from the CPU side of things to the memory side of things, and
+the order in which the effects are perceived to happen by the other observers
+in the system.
+
+[!] Memory barriers are _not_ needed within a given CPU, as CPUs always see
+their own loads and stores as if they had happened in program order.
+
+[!] MMIO or other device accesses may bypass the cache system. This depends on
+the properties of the memory window through which devices are accessed and/or
+the use of any special device communication instructions the CPU may have.
+
+
+CACHE COHERENCY
+---------------
+
+Life isn't quite as simple as it may appear above, however: for while the
+caches are expected to be coherent, there's no guarantee that that coherency
+will be ordered. This means that whilst changes made on one CPU will
+eventually become visible on all CPUs, there's no guarantee that they will
+become apparent in the same order on those other CPUs.
+
+
+Consider dealing with a system that has pair of CPUs (1 & 2), each of which has
+a pair of parallel data caches (CPU 1 has A/B, and CPU 2 has C/D):
+
+ :
+ : +--------+
+ : +---------+ | |
+ +--------+ : +--->| Cache A |<------->| |
+ | | : | +---------+ | |
+ | CPU 1 |<---+ | |
+ | | : | +---------+ | |
+ +--------+ : +--->| Cache B |<------->| |
+ : +---------+ | |
+ : | Memory |
+ : +---------+ | System |
+ +--------+ : +--->| Cache C |<------->| |
+ | | : | +---------+ | |
+ | CPU 2 |<---+ | |
+ | | : | +---------+ | |
+ +--------+ : +--->| Cache D |<------->| |
+ : +---------+ | |
+ : +--------+
+ :
+
+Imagine the system has the following properties:
+
+ (*) an odd-numbered cache line may be in cache A, cache C or it may still be
+ resident in memory;
+
+ (*) an even-numbered cache line may be in cache B, cache D or it may still be
+ resident in memory;
+
+ (*) whilst the CPU core is interrogating one cache, the other cache may be
+ making use of the bus to access the rest of the system - perhaps to
+ displace a dirty cacheline or to do a speculative load;
+
+ (*) each cache has a queue of operations that need to be applied to that cache
+ to maintain coherency with the rest of the system;
+
+ (*) the coherency queue is not flushed by normal loads to lines already
+ present in the cache, even though the contents of the queue may
+ potentially effect those loads.
+
+Imagine, then, that two writes are made on the first CPU, with a write barrier
+between them to guarantee that they will appear to reach that CPU's caches in
+the requisite order:
+
+ CPU 1 CPU 2 COMMENT
+ =============== =============== =======================================
+ u == 0, v == 1 and p == &u, q == &u
+ v = 2;
+ smp_wmb(); Make sure change to v visible before
+ change to p
+ <A:modify v=2> v is now in cache A exclusively
+ p = &v;
+ <B:modify p=&v> p is now in cache B exclusively
+
+The write memory barrier forces the other CPUs in the system to perceive that
+the local CPU's caches have apparently been updated in the correct order. But
+now imagine that the second CPU that wants to read those values:
+
+ CPU 1 CPU 2 COMMENT
+ =============== =============== =======================================
+ ...
+ q = p;
+ x = *q;
+
+The above pair of reads may then fail to happen in expected order, as the
+cacheline holding p may get updated in one of the second CPU's caches whilst
+the update to the cacheline holding v is delayed in the other of the second
+CPU's caches by some other cache event:
+
+ CPU 1 CPU 2 COMMENT
+ =============== =============== =======================================
+ u == 0, v == 1 and p == &u, q == &u
+ v = 2;
+ smp_wmb();
+ <A:modify v=2> <C:busy>
+ <C:queue v=2>
+ p = &b; q = p;
+ <D:request p>
+ <B:modify p=&v> <D:commit p=&v>
+ <D:read p>
+ x = *q;
+ <C:read *q> Reads from v before v updated in cache
+ <C:unbusy>
+ <C:commit v=2>
+
+Basically, whilst both cachelines will be updated on CPU 2 eventually, there's
+no guarantee that, without intervention, the order of update will be the same
+as that committed on CPU 1.
+
+
+To intervene, we need to interpolate a data dependency barrier or a read
+barrier between the loads. This will force the cache to commit its coherency
+queue before processing any further requests:
+
+ CPU 1 CPU 2 COMMENT
+ =============== =============== =======================================
+ u == 0, v == 1 and p == &u, q == &u
+ v = 2;
+ smp_wmb();
+ <A:modify v=2> <C:busy>
+ <C:queue v=2>
+ p = &b; q = p;
+ <D:request p>
+ <B:modify p=&v> <D:commit p=&v>
+ <D:read p>
+ smp_read_barrier_depends()
+ <C:unbusy>
+ <C:commit v=2>
+ x = *q;
+ <C:read *q> Reads from v after v updated in cache
+
+
+This sort of problem can be encountered on DEC Alpha processors as they have a
+split cache that improves performance by making better use of the data bus.
+Whilst most CPUs do imply a data dependency barrier on the read when a memory
+access depends on a read, not all do, so it may not be relied on.
+
+Other CPUs may also have split caches, but must coordinate between the various
+cachelets for normal memory accesss. The semantics of the Alpha removes the
+need for coordination in absence of memory barriers.
+
+
+CACHE COHERENCY VS DMA
+----------------------
+
+Not all systems maintain cache coherency with respect to devices doing DMA. In
+such cases, a device attempting DMA may obtain stale data from RAM because
+dirty cache lines may be resident in the caches of various CPUs, and may not
+have been written back to RAM yet. To deal with this, the appropriate part of
+the kernel must flush the overlapping bits of cache on each CPU (and maybe
+invalidate them as well).
+
+In addition, the data DMA'd to RAM by a device may be overwritten by dirty
+cache lines being written back to RAM from a CPU's cache after the device has
+installed its own data, or cache lines simply present in a CPUs cache may
+simply obscure the fact that RAM has been updated, until at such time as the
+cacheline is discarded from the CPU's cache and reloaded. To deal with this,
+the appropriate part of the kernel must invalidate the overlapping bits of the
+cache on each CPU.
+
+See Documentation/cachetlb.txt for more information on cache management.
+
+
+CACHE COHERENCY VS MMIO
+-----------------------
+
+Memory mapped I/O usually takes place through memory locations that are part of
+a window in the CPU's memory space that have different properties assigned than
+the usual RAM directed window.
+
+Amongst these properties is usually the fact that such accesses bypass the
+caching entirely and go directly to the device buses. This means MMIO accesses
+may, in effect, overtake accesses to cached memory that were emitted earlier.
+A memory barrier isn't sufficient in such a case, but rather the cache must be
+flushed between the cached memory write and the MMIO access if the two are in
+any way dependent.
+
+
+=========================
+THE THINGS CPUS GET UP TO
+=========================
+
+A programmer might take it for granted that the CPU will perform memory
+operations in exactly the order specified, so that if a CPU is, for example,
+given the following piece of code to execute:
+
+ a = *A;
+ *B = b;
+ c = *C;
+ d = *D;
+ *E = e;
+
+They would then expect that the CPU will complete the memory operation for each
+instruction before moving on to the next one, leading to a definite sequence of
+operations as seen by external observers in the system:
+
+ LOAD *A, STORE *B, LOAD *C, LOAD *D, STORE *E.
+
+
+Reality is, of course, much messier. With many CPUs and compilers, the above
+assumption doesn't hold because:
+
+ (*) loads are more likely to need to be completed immediately to permit
+ execution progress, whereas stores can often be deferred without a
+ problem;
+
+ (*) loads may be done speculatively, and the result discarded should it prove
+ to have been unnecessary;
+
+ (*) loads may be done speculatively, leading to the result having being
+ fetched at the wrong time in the expected sequence of events;
+
+ (*) the order of the memory accesses may be rearranged to promote better use
+ of the CPU buses and caches;
+
+ (*) loads and stores may be combined to improve performance when talking to
+ memory or I/O hardware that can do batched accesses of adjacent locations,
+ thus cutting down on transaction setup costs (memory and PCI devices may
+ both be able to do this); and
+
+ (*) the CPU's data cache may affect the ordering, and whilst cache-coherency
+ mechanisms may alleviate this - once the store has actually hit the cache
+ - there's no guarantee that the coherency management will be propagated in
+ order to other CPUs.
+
+So what another CPU, say, might actually observe from the above piece of code
+is:
+
+ LOAD *A, ..., LOAD {*C,*D}, STORE *E, STORE *B
+
+ (Where "LOAD {*C,*D}" is a combined load)
+
+
+However, it is guaranteed that a CPU will be self-consistent: it will see its
+_own_ accesses appear to be correctly ordered, without the need for a memory
+barrier. For instance with the following code:
+
+ U = *A;
+ *A = V;
+ *A = W;
+ X = *A;
+ *A = Y;
+ Z = *A;
+
+and assuming no intervention by an external influence, it can be assumed that
+the final result will appear to be:
+
+ U == the original value of *A
+ X == W
+ Z == Y
+ *A == Y
+
+The code above may cause the CPU to generate the full sequence of memory
+accesses:
+
+ U=LOAD *A, STORE *A=V, STORE *A=W, X=LOAD *A, STORE *A=Y, Z=LOAD *A
+
+in that order, but, without intervention, the sequence may have almost any
+combination of elements combined or discarded, provided the program's view of
+the world remains consistent.
+
+The compiler may also combine, discard or defer elements of the sequence before
+the CPU even sees them.
+
+For instance:
+
+ *A = V;
+ *A = W;
+
+may be reduced to:
+
+ *A = W;
+
+since, without a write barrier, it can be assumed that the effect of the
+storage of V to *A is lost. Similarly:
+
+ *A = Y;
+ Z = *A;
+
+may, without a memory barrier, be reduced to:
+
+ *A = Y;
+ Z = Y;
+
+and the LOAD operation never appear outside of the CPU.
+
+
+AND THEN THERE'S THE ALPHA
+--------------------------
+
+The DEC Alpha CPU is one of the most relaxed CPUs there is. Not only that,
+some versions of the Alpha CPU have a split data cache, permitting them to have
+two semantically related cache lines updating at separate times. This is where
+the data dependency barrier really becomes necessary as this synchronises both
+caches with the memory coherence system, thus making it seem like pointer
+changes vs new data occur in the right order.
+
+The Alpha defines the Linux's kernel's memory barrier model.
+
+See the subsection on "Cache Coherency" above.
+
+
+==========
+REFERENCES
+==========
+
+Alpha AXP Architecture Reference Manual, Second Edition (Sites & Witek,
+Digital Press)
+ Chapter 5.2: Physical Address Space Characteristics
+ Chapter 5.4: Caches and Write Buffers
+ Chapter 5.5: Data Sharing
+ Chapter 5.6: Read/Write Ordering
+
+AMD64 Architecture Programmer's Manual Volume 2: System Programming
+ Chapter 7.1: Memory-Access Ordering
+ Chapter 7.4: Buffering and Combining Memory Writes
+
+IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+System Programming Guide
+ Chapter 7.1: Locked Atomic Operations
+ Chapter 7.2: Memory Ordering
+ Chapter 7.4: Serializing Instructions
+
+The SPARC Architecture Manual, Version 9
+ Chapter 8: Memory Models
+ Appendix D: Formal Specification of the Memory Models
+ Appendix J: Programming with the Memory Models
+
+UltraSPARC Programmer Reference Manual
+ Chapter 5: Memory Accesses and Cacheability
+ Chapter 15: Sparc-V9 Memory Models
+
+UltraSPARC III Cu User's Manual
+ Chapter 9: Memory Models
+
+UltraSPARC IIIi Processor User's Manual
+ Chapter 8: Memory Models
+
+UltraSPARC Architecture 2005
+ Chapter 9: Memory
+ Appendix D: Formal Specifications of the Memory Models
+
+UltraSPARC T1 Supplement to the UltraSPARC Architecture 2005
+ Chapter 8: Memory Models
+ Appendix F: Caches and Cache Coherency
+
+Solaris Internals, Core Kernel Architecture, p63-68:
+ Chapter 3.3: Hardware Considerations for Locks and
+ Synchronization
+
+Unix Systems for Modern Architectures, Symmetric Multiprocessing and Caching
+for Kernel Programmers:
+ Chapter 13: Other Memory Models
+
+Intel Itanium Architecture Software Developer's Manual: Volume 1:
+ Section 2.6: Speculation
+ Section 4.4: Memory Access
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 4fc8e98743207..aaf99d5f0dad6 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -254,7 +254,7 @@ and, the number of frames be
<block number> * <block size> / <frame size>
-Suposse the following parameters, which apply for 2.6 kernel and an
+Suppose the following parameters, which apply for 2.6 kernel and an
i386 architecture:
<size-max> = 131072 bytes
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index ec3d109d787a4..76750fb9151a1 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -138,7 +138,7 @@ This means that you have to read/write IP packets when you are using tun and
ethernet frames when using tap.
5. What is the difference between BPF and TUN/TAP driver?
-BFP is an advanced packet filter. It can be attached to existing
+BPF is an advanced packet filter. It can be attached to existing
network interface. It does not provide a virtual network interface.
A TUN/TAP driver does provide a virtual network interface and it is possible
to attach BPF to this interface.
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index 97420f08c7869..4739c5c3face3 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -1,5 +1,11 @@
This file details changes in 2.6 which affect PCMCIA card driver authors:
+* New release helper (as of 2.6.17)
+ Instead of calling pcmcia_release_{configuration,io,irq,win}, all that's
+ necessary now is calling pcmcia_disable_device. As there is no valid
+ reason left to call pcmcia_release_io and pcmcia_release_irq, the
+ exports for them were removed.
+
* Unify detach and REMOVAL event code, as well as attach and INSERTION
code (as of 2.6.16)
void (*remove) (struct pcmcia_device *dev);
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 8c71954559633..bca50903233f0 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -52,7 +52,7 @@
51 -> ProVideo PV952 [1540:9524]
52 -> AverMedia AverTV/305 [1461:2108]
53 -> ASUS TV-FM 7135 [1043:4845]
- 54 -> LifeView FlyTV Platinum FM [5168:0214,1489:0214]
+ 54 -> LifeView FlyTV Platinum FM / Gold [5168:0214,1489:0214,5168:0304]
55 -> LifeView FlyDVB-T DUO [5168:0306]
56 -> Avermedia AVerTV 307 [1461:a70a]
57 -> Avermedia AVerTV GO 007 FM [1461:f31f]
@@ -84,7 +84,7 @@
83 -> Terratec Cinergy 250 PCI TV [153b:1160]
84 -> LifeView FlyDVB Trio [5168:0319]
85 -> AverTV DVB-T 777 [1461:2c05]
- 86 -> LifeView FlyDVB-T [5168:0301]
+ 86 -> LifeView FlyDVB-T / Genius VideoWonder DVB-T [5168:0301,1489:0301]
87 -> ADS Instant TV Duo Cardbus PTV331 [0331:1421]
88 -> Tevion/KWorld DVB-T 220RF [17de:7201]
89 -> ELSA EX-VISION 700TV [1048:226c]
@@ -92,3 +92,4 @@
91 -> AVerMedia A169 B [1461:7360]
92 -> AVerMedia A169 B1 [1461:6360]
93 -> Medion 7134 Bridge #2 [16be:0005]
+ 94 -> LifeView FlyDVB-T Hybrid Cardbus [5168:3306,5168:3502]
diff --git a/Documentation/usb/et61x251.txt b/Documentation/video4linux/et61x251.txt
index 29340282ab5f6..29340282ab5f6 100644
--- a/Documentation/usb/et61x251.txt
+++ b/Documentation/video4linux/et61x251.txt
diff --git a/Documentation/usb/ibmcam.txt b/Documentation/video4linux/ibmcam.txt
index c250036441311..4a40a2e99451a 100644
--- a/Documentation/usb/ibmcam.txt
+++ b/Documentation/video4linux/ibmcam.txt
@@ -122,7 +122,7 @@ WHAT YOU NEED:
- A Linux box with USB support (2.3/2.4; 2.2 w/backport may work)
- A Video4Linux compatible frame grabber program such as xawtv.
-
+
HOW TO COMPILE THE DRIVER:
You need to compile the driver only if you are a developer
diff --git a/Documentation/usb/ov511.txt b/Documentation/video4linux/ov511.txt
index a7fc0432bff1a..142741e3c5786 100644
--- a/Documentation/usb/ov511.txt
+++ b/Documentation/video4linux/ov511.txt
@@ -9,7 +9,7 @@ INTRODUCTION:
This is a driver for the OV511, a USB-only chip used in many "webcam" devices.
Any camera using the OV511/OV511+ and the OV6620/OV7610/20/20AE should work.
-Video capture devices that use the Philips SAA7111A decoder also work. It
+Video capture devices that use the Philips SAA7111A decoder also work. It
supports streaming and capture of color or monochrome video via the Video4Linux
API. Most V4L apps are compatible with it. Most resolutions with a width and
height that are a multiple of 8 are supported.
@@ -52,15 +52,15 @@ from it:
chmod 666 /dev/video
chmod 666 /dev/video0 (if necessary)
-
+
Now you are ready to run a video app! Both vidcat and xawtv work well for me
at 640x480.
-
+
[Using vidcat:]
vidcat -s 640x480 -p c > test.jpg
xview test.jpg
-
+
[Using xawtv:]
From the main xawtv directory:
@@ -70,7 +70,7 @@ From the main xawtv directory:
make
make install
-Now you should be able to run xawtv. Right click for the options dialog.
+Now you should be able to run xawtv. Right click for the options dialog.
MODULE PARAMETERS:
@@ -286,4 +286,3 @@ Randy Dunlap, and others. Big thanks to them for their pioneering work on that
and the USB stack. Thanks to Bret Wallach for getting camera reg IO, ISOC, and
image capture working. Thanks to Orion Sky Lawlor, Kevin Moore, and Claudio
Matsuoka for their work as well.
-
diff --git a/Documentation/usb/se401.txt b/Documentation/video4linux/se401.txt
index 7b9d1c960a10f..7b9d1c960a10f 100644
--- a/Documentation/usb/se401.txt
+++ b/Documentation/video4linux/se401.txt
diff --git a/Documentation/usb/sn9c102.txt b/Documentation/video4linux/sn9c102.txt
index b957beae56076..142920bc011fb 100644
--- a/Documentation/usb/sn9c102.txt
+++ b/Documentation/video4linux/sn9c102.txt
@@ -174,7 +174,7 @@ Module parameters are listed below:
-------------------------------------------------------------------------------
Name: video_nr
Type: short array (min = 0, max = 64)
-Syntax: <-1|n[,...]>
+Syntax: <-1|n[,...]>
Description: Specify V4L2 minor mode number:
-1 = use next available
n = use minor number n
@@ -187,7 +187,7 @@ Default: -1
-------------------------------------------------------------------------------
Name: force_munmap
Type: bool array (min = 0, max = 64)
-Syntax: <0|1[,...]>
+Syntax: <0|1[,...]>
Description: Force the application to unmap previously mapped buffer memory
before calling any VIDIOC_S_CROP or VIDIOC_S_FMT ioctl's. Not
all the applications support this feature. This parameter is
@@ -206,7 +206,7 @@ Default: 2
-------------------------------------------------------------------------------
Name: debug
Type: ushort
-Syntax: <n>
+Syntax: <n>
Description: Debugging information level, from 0 to 3:
0 = none (use carefully)
1 = critical errors
@@ -267,7 +267,7 @@ The sysfs interface also provides the "frame_header" entry, which exports the
frame header of the most recent requested and captured video frame. The header
is always 18-bytes long and is appended to every video frame by the SN9C10x
controllers. As an example, this additional information can be used by the user
-application for implementing auto-exposure features via software.
+application for implementing auto-exposure features via software.
The following table describes the frame header:
@@ -441,7 +441,7 @@ blue pixels in one video frame. Each pixel is associated with a 8-bit long
value and is disposed in memory according to the pattern shown below:
B[0] G[1] B[2] G[3] ... B[m-2] G[m-1]
-G[m] R[m+1] G[m+2] R[m+2] ... G[2m-2] R[2m-1]
+G[m] R[m+1] G[m+2] R[m+2] ... G[2m-2] R[2m-1]
...
... B[(n-1)(m-2)] G[(n-1)(m-1)]
... G[n(m-2)] R[n(m-1)]
@@ -472,12 +472,12 @@ The pixel reference value is calculated as follows:
The algorithm purely describes the conversion from compressed Bayer code used
in the SN9C10x chips to uncompressed Bayer. Additional steps are required to
convert this to a color image (i.e. a color interpolation algorithm).
-
+
The following Huffman codes have been found:
-0: +0 (relative to reference pixel value)
+0: +0 (relative to reference pixel value)
100: +4
101: -4?
-1110xxxx: set absolute value to xxxx.0000
+1110xxxx: set absolute value to xxxx.0000
1101: +11
1111: -11
11001: +20
diff --git a/Documentation/usb/stv680.txt b/Documentation/video4linux/stv680.txt
index 6448041e7a37b..4f8946f32f51b 100644
--- a/Documentation/usb/stv680.txt
+++ b/Documentation/video4linux/stv680.txt
@@ -5,15 +5,15 @@ Copyright, 2001, Kevin Sisson
INTRODUCTION:
-STMicroelectronics produces the STV0680B chip, which comes in two
-types, -001 and -003. The -003 version allows the recording and downloading
-of sound clips from the camera, and allows a flash attachment. Otherwise,
-it uses the same commands as the -001 version. Both versions support a
-variety of SDRAM sizes and sensors, allowing for a maximum of 26 VGA or 20
-CIF pictures. The STV0680 supports either a serial or a usb interface, and
+STMicroelectronics produces the STV0680B chip, which comes in two
+types, -001 and -003. The -003 version allows the recording and downloading
+of sound clips from the camera, and allows a flash attachment. Otherwise,
+it uses the same commands as the -001 version. Both versions support a
+variety of SDRAM sizes and sensors, allowing for a maximum of 26 VGA or 20
+CIF pictures. The STV0680 supports either a serial or a usb interface, and
video is possible through the usb interface.
-The following cameras are known to work with this driver, although any
+The following cameras are known to work with this driver, although any
camera with Vendor/Product codes of 0553/0202 should work:
Aiptek Pencam (various models)
@@ -34,15 +34,15 @@ http://www.linux-usb.org
MODULE OPTIONS:
When the driver is compiled as a module, you can set a "swapRGB=1"
-option, if necessary, for those applications that require it
-(such as xawtv). However, the driver should detect and set this
+option, if necessary, for those applications that require it
+(such as xawtv). However, the driver should detect and set this
automatically, so this option should not normally be used.
KNOWN PROBLEMS:
-The driver seems to work better with the usb-ohci than the usb-uhci host
-controller driver.
+The driver seems to work better with the usb-ohci than the usb-uhci host
+controller driver.
HELP:
@@ -50,6 +50,4 @@ The latest info on this driver can be found at:
http://personal.clt.bellsouth.net/~kjsisson or at
http://stv0680-usb.sourceforge.net
-Any questions to me can be send to: kjsisson@bellsouth.net
-
-
+Any questions to me can be send to: kjsisson@bellsouth.net \ No newline at end of file
diff --git a/Documentation/usb/w9968cf.txt b/Documentation/video4linux/w9968cf.txt
index 9d46cd0b19e31..3b704f2aae6dc 100644
--- a/Documentation/usb/w9968cf.txt
+++ b/Documentation/video4linux/w9968cf.txt
@@ -1,5 +1,5 @@
- W996[87]CF JPEG USB Dual Mode Camera Chip
+ W996[87]CF JPEG USB Dual Mode Camera Chip
Driver for Linux 2.6 (basic version)
=========================================
@@ -115,7 +115,7 @@ additional testing and full support, would be much appreciated.
======================
For it to work properly, the driver needs kernel support for Video4Linux, USB
and I2C, and the "ovcamchip" module for the image sensor. Make sure you are not
-actually using any external "ovcamchip" module, given that the W996[87]CF
+actually using any external "ovcamchip" module, given that the W996[87]CF
driver depends on the version of the module present in the official kernels.
The following options of the kernel configuration file must be enabled and
@@ -197,16 +197,16 @@ Note: The kernel must be compiled with the CONFIG_KMOD option
enabled for the 'ovcamchip' module to be loaded and for
this parameter to be present.
-------------------------------------------------------------------------------
-Name: simcams
-Type: int
-Syntax: <n>
+Name: simcams
+Type: int
+Syntax: <n>
Description: Number of cameras allowed to stream simultaneously.
n may vary from 0 to 32.
Default: 32
-------------------------------------------------------------------------------
Name: video_nr
Type: int array (min = 0, max = 32)
-Syntax: <-1|n[,...]>
+Syntax: <-1|n[,...]>
Description: Specify V4L minor mode number.
-1 = use next available
n = use minor number n
@@ -219,7 +219,7 @@ Default: -1
-------------------------------------------------------------------------------
Name: packet_size
Type: int array (min = 0, max = 32)
-Syntax: <n[,...]>
+Syntax: <n[,...]>
Description: Specify the maximum data payload size in bytes for alternate
settings, for each device. n is scaled between 63 and 1023.
Default: 1023
@@ -234,7 +234,7 @@ Default: 2
-------------------------------------------------------------------------------
Name: double_buffer
Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
+Syntax: <0|1[,...]>
Description: Hardware double buffering: 0 disabled, 1 enabled.
It should be enabled if you want smooth video output: if you
obtain out of sync. video, disable it, or try to
@@ -243,13 +243,13 @@ Default: 1 for every device.
-------------------------------------------------------------------------------
Name: clamping
Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
+Syntax: <0|1[,...]>
Description: Video data clamping: 0 disabled, 1 enabled.
Default: 0 for every device.
-------------------------------------------------------------------------------
Name: filter_type
Type: int array (min = 0, max = 32)
-Syntax: <0|1|2[,...]>
+Syntax: <0|1|2[,...]>
Description: Video filter type.
0 none, 1 (1-2-1) 3-tap filter, 2 (2-3-6-3-2) 5-tap filter.
The filter is used to reduce noise and aliasing artifacts
@@ -258,13 +258,13 @@ Default: 0 for every device.
-------------------------------------------------------------------------------
Name: largeview
Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
+Syntax: <0|1[,...]>
Description: Large view: 0 disabled, 1 enabled.
Default: 1 for every device.
-------------------------------------------------------------------------------
Name: upscaling
Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
+Syntax: <0|1[,...]>
Description: Software scaling (for non-compressed video only):
0 disabled, 1 enabled.
Disable it if you have a slow CPU or you don't have enough
@@ -341,8 +341,8 @@ Default: 50 for every device.
-------------------------------------------------------------------------------
Name: bandingfilter
Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
-Description: Banding filter to reduce effects of fluorescent
+Syntax: <0|1[,...]>
+Description: Banding filter to reduce effects of fluorescent
lighting:
0 disabled, 1 enabled.
This filter tries to reduce the pattern of horizontal
@@ -374,7 +374,7 @@ Default: 0 for every device.
-------------------------------------------------------------------------------
Name: monochrome
Type: bool array (min = 0, max = 32)
-Syntax: <0|1[,...]>
+Syntax: <0|1[,...]>
Description: The image sensor is monochrome:
0 = no, 1 = yes
Default: 0 for every device.
@@ -400,19 +400,19 @@ Default: 32768 for every device.
-------------------------------------------------------------------------------
Name: contrast
Type: long array (min = 0, max = 32)
-Syntax: <n[,...]>
+Syntax: <n[,...]>
Description: Set picture contrast (0-65535).
Default: 50000 for every device.
-------------------------------------------------------------------------------
Name: whiteness
Type: long array (min = 0, max = 32)
-Syntax: <n[,...]>
+Syntax: <n[,...]>
Description: Set picture whiteness (0-65535).
Default: 32768 for every device.
-------------------------------------------------------------------------------
Name: debug
Type: int
-Syntax: <n>
+Syntax: <n>
Description: Debugging information level, from 0 to 6:
0 = none (use carefully)
1 = critical errors
diff --git a/Documentation/usb/zc0301.txt b/Documentation/video4linux/zc0301.txt
index f55262c6733b6..f55262c6733b6 100644
--- a/Documentation/usb/zc0301.txt
+++ b/Documentation/video4linux/zc0301.txt
diff --git a/MAINTAINERS b/MAINTAINERS
index c9465811addce..f97657b7e2c71 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1451,6 +1451,12 @@ P: Juanjo Ciarlante
M: jjciarla@raiz.uncu.edu.ar
S: Maintained
+IPATH DRIVER:
+P: Bryan O'Sullivan
+M: support@pathscale.com
+L: openib-general@openib.org
+S: Supported
+
IPX NETWORK LAYER
P: Arnaldo Carvalho de Melo
M: acme@conectiva.com.br
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 1898ea79d0e29..9d6186d50245c 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -216,8 +216,6 @@ EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(get_wchan);
-
#ifdef CONFIG_ALPHA_IRONGATE
EXPORT_SYMBOL(irongate_ioremap);
EXPORT_SYMBOL(irongate_iounmap);
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 44866cb26a802..7f6a98455e74e 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -435,7 +435,7 @@ marvel_specify_io7(char *str)
str = pchar;
} while(*str);
- return 0;
+ return 1;
}
__setup("io7=", marvel_specify_io7);
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index dd87696705969..a15e18a00258e 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/platform_device.h>
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
@@ -1478,3 +1479,20 @@ alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
#endif
return NOTIFY_DONE;
}
+
+static __init int add_pcspkr(void)
+{
+ struct platform_device *pd;
+ int ret;
+
+ pd = platform_device_alloc("pcspkr", -1);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ platform_device_put(pd);
+
+ return ret;
+}
+device_initcall(add_pcspkr);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba46d779ede77..dc5a9332c9159 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -77,6 +77,14 @@ config FIQ
config ARCH_MTD_XIP
bool
+config VECTORS_BASE
+ hex
+ default 0xffff0000 if MMU
+ default DRAM_BASE if REMAP_VECTORS_TO_RAM
+ default 0x00000000
+ help
+ The base address of exception vectors.
+
source "init/Kconfig"
menu "System Type"
@@ -839,6 +847,8 @@ source "drivers/misc/Kconfig"
source "drivers/mfd/Kconfig"
+source "drivers/leds/Kconfig"
+
source "drivers/media/Kconfig"
source "drivers/video/Kconfig"
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
new file mode 100644
index 0000000000000..e1574be2ded61
--- /dev/null
+++ b/arch/arm/Kconfig-nommu
@@ -0,0 +1,44 @@
+#
+# Kconfig for uClinux(non-paged MM) depend configurations
+# Hyok S. Choi <hyok.choi@samsung.com>
+#
+
+config SET_MEM_PARAM
+ bool "Set flash/sdram size and base addr"
+ help
+ Say Y to manually set the base addresses and sizes.
+ otherwise, the default values are assigned.
+
+config DRAM_BASE
+ hex '(S)DRAM Base Address' if SET_MEM_PARAM
+ default 0x00800000
+
+config DRAM_SIZE
+ hex '(S)DRAM SIZE' if SET_MEM_PARAM
+ default 0x00800000
+
+config FLASH_MEM_BASE
+ hex 'FLASH Base Address' if SET_MEM_PARAM
+ default 0x00400000
+
+config FLASH_SIZE
+ hex 'FLASH Size' if SET_MEM_PARAM
+ default 0x00400000
+
+config REMAP_VECTORS_TO_RAM
+ bool 'Install vectors to the begining of RAM' if DRAM_BASE
+ depends on DRAM_BASE
+ help
+ The kernel needs to change the hardware exception vectors.
+ In nommu mode, the hardware exception vectors are normally
+ placed at address 0x00000000. However, this region may be
+ occupied by read-only memory depending on H/W design.
+
+ If the region contains read-write memory, say 'n' here.
+
+ If your CPU provides a remap facility which allows the exception
+ vectors to be mapped to writable memory, say 'n' here.
+
+ Otherwise, say 'y' here. In this case, the kernel will require
+ external support to redirect the hardware exception vectors to
+ the writable versions located at DRAM_BASE.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index ce3e804ea0f3d..95a96275f88ae 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -20,6 +20,11 @@ GZFLAGS :=-9
# Select a platform tht is kept up-to-date
KBUILD_DEFCONFIG := versatile_defconfig
+# defines filename extension depending memory manement type.
+ifeq ($(CONFIG_MMU),)
+MMUEXT := -nommu
+endif
+
ifeq ($(CONFIG_FRAME_POINTER),y)
CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
endif
@@ -73,7 +78,7 @@ AFLAGS +=$(CFLAGS_ABI) $(arch-y) $(tune-y) -msoft-float
CHECKFLAGS += -D__arm__
#Default value
-head-y := arch/arm/kernel/head.o arch/arm/kernel/init_task.o
+head-y := arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
textofs-y := 0x00008000
machine-$(CONFIG_ARCH_RPC) := rpc
@@ -133,7 +138,7 @@ else
MACHINE :=
endif
-export TEXT_OFFSET GZFLAGS
+export TEXT_OFFSET GZFLAGS MMUEXT
# Do we have FASTFPE?
FASTFPE :=arch/arm/fastfpe
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 491c7e4c9ac67..b56f5e691d650 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -2,6 +2,7 @@
* linux/arch/arm/boot/compressed/head.S
*
* Copyright (C) 1996-2002 Russell King
+ * Copyright (C) 2004 Hyok S. Choi (MPU support)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -320,6 +321,62 @@ params: ldr r0, =params_phys
cache_on: mov r3, #8 @ cache_on function
b call_cache_fn
+/*
+ * Initialize the highest priority protection region, PR7
+ * to cover all 32bit address and cacheable and bufferable.
+ */
+__armv4_mpu_cache_on:
+ mov r0, #0x3f @ 4G, the whole
+ mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
+ mcr p15, 0, r0, c6, c7, 1
+
+ mov r0, #0x80 @ PR7
+ mcr p15, 0, r0, c2, c0, 0 @ D-cache on
+ mcr p15, 0, r0, c2, c0, 1 @ I-cache on
+ mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
+
+ mov r0, #0xc000
+ mcr p15, 0, r0, c5, c0, 1 @ I-access permission
+ mcr p15, 0, r0, c5, c0, 0 @ D-access permission
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
+ mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ @ ...I .... ..D. WC.M
+ orr r0, r0, #0x002d @ .... .... ..1. 11.1
+ orr r0, r0, #0x1000 @ ...1 .... .... ....
+
+ mcr p15, 0, r0, c1, c0, 0 @ write control reg
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
+ mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
+ mov pc, lr
+
+__armv3_mpu_cache_on:
+ mov r0, #0x3f @ 4G, the whole
+ mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
+
+ mov r0, #0x80 @ PR7
+ mcr p15, 0, r0, c2, c0, 0 @ cache on
+ mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
+
+ mov r0, #0xc000
+ mcr p15, 0, r0, c5, c0, 0 @ access permission
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ @ .... .... .... WC.M
+ orr r0, r0, #0x000d @ .... .... .... 11.1
+ mov r0, #0
+ mcr p15, 0, r0, c1, c0, 0 @ write control reg
+
+ mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
+ mov pc, lr
+
__setup_mmu: sub r3, r4, #16384 @ Page directory size
bic r3, r3, #0xff @ Align the pointer
bic r3, r3, #0x3f00
@@ -496,6 +553,18 @@ proc_types:
b __armv4_mmu_cache_off
mov pc, lr
+ .word 0x41007400 @ ARM74x
+ .word 0xff00ff00
+ b __armv3_mpu_cache_on
+ b __armv3_mpu_cache_off
+ b __armv3_mpu_cache_flush
+
+ .word 0x41009400 @ ARM94x
+ .word 0xff00ff00
+ b __armv4_mpu_cache_on
+ b __armv4_mpu_cache_off
+ b __armv4_mpu_cache_flush
+
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000
mov pc, lr
@@ -562,6 +631,24 @@ proc_types:
cache_off: mov r3, #12 @ cache_off function
b call_cache_fn
+__armv4_mpu_cache_off:
+ mrc p15, 0, r0, c1, c0
+ bic r0, r0, #0x000d
+ mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
+ mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
+ mov pc, lr
+
+__armv3_mpu_cache_off:
+ mrc p15, 0, r0, c1, c0
+ bic r0, r0, #0x000d
+ mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
+ mov pc, lr
+
__armv4_mmu_cache_off:
mrc p15, 0, r0, c1, c0
bic r0, r0, #0x000d
@@ -601,6 +688,24 @@ cache_clean_flush:
mov r3, #16
b call_cache_fn
+__armv4_mpu_cache_flush:
+ mov r2, #1
+ mov r3, #0
+ mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
+ mov r1, #7 << 5 @ 8 segments
+1: orr r3, r1, #63 << 26 @ 64 entries
+2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
+ subs r3, r3, #1 << 26
+ bcs 2b @ entries 63 to 0
+ subs r1, r1, #1 << 5
+ bcs 1b @ segments 7 to 0
+
+ teq r2, #0
+ mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+
__armv6_mmu_cache_flush:
mov r1, #0
mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
@@ -638,6 +743,7 @@ no_cache_id:
mov pc, lr
__armv3_mmu_cache_flush:
+__armv3_mpu_cache_flush:
mov r1, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 978d32e82d397..3cd8c9ee4510f 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/leds.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
@@ -75,6 +76,7 @@ static void sharpsl_battery_thread(void *private_);
struct sharpsl_pm_status sharpsl_pm;
DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
static int get_percentage(int voltage)
@@ -190,10 +192,10 @@ void sharpsl_pm_led(int val)
dev_err(sharpsl_pm.dev, "Charging Error!\n");
} else if (val == SHARPSL_LED_ON) {
dev_dbg(sharpsl_pm.dev, "Charge LED On\n");
-
+ led_trigger_event(sharpsl_charge_led_trigger, LED_FULL);
} else {
dev_dbg(sharpsl_pm.dev, "Charge LED Off\n");
-
+ led_trigger_event(sharpsl_charge_led_trigger, LED_OFF);
}
}
@@ -786,6 +788,8 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
init_timer(&sharpsl_pm.chrg_full_timer);
sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
+ led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger);
+
sharpsl_pm.machinfo->init();
device_create_file(&pdev->dev, &dev_attr_battery_percentage);
@@ -807,6 +811,8 @@ static int sharpsl_pm_remove(struct platform_device *pdev)
device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
+ led_trigger_unregister_simple(sharpsl_charge_led_trigger);
+
sharpsl_pm.machinfo->exit();
del_timer_sync(&sharpsl_pm.chrg_full_timer);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 355914ffb1924..ab8e600c18c8e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -666,7 +666,7 @@ __kuser_helper_start:
*
* #define __kernel_dmb() \
* asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
- * : : : "lr","cc" )
+ * : : : "r0", "lr","cc" )
*/
__kuser_memory_barrier: @ 0xffff0fa0
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
new file mode 100644
index 0000000000000..a52da0ddb43d0
--- /dev/null
+++ b/arch/arm/kernel/head-common.S
@@ -0,0 +1,217 @@
+/*
+ * linux/arch/arm/kernel/head-common.S
+ *
+ * Copyright (C) 1994-2002 Russell King
+ * Copyright (c) 2003 ARM Limited
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+ .type __switch_data, %object
+__switch_data:
+ .long __mmap_switched
+ .long __data_loc @ r4
+ .long __data_start @ r5
+ .long __bss_start @ r6
+ .long _end @ r7
+ .long processor_id @ r4
+ .long __machine_arch_type @ r5
+ .long cr_alignment @ r6
+ .long init_thread_union + THREAD_START_SP @ sp
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode,
+ * and uses absolute addresses; this is not position independent.
+ *
+ * r0 = cp#15 control register
+ * r1 = machine ID
+ * r9 = processor ID
+ */
+ .type __mmap_switched, %function
+__mmap_switched:
+ adr r3, __switch_data + 4
+
+ ldmia r3!, {r4, r5, r6, r7}
+ cmp r4, r5 @ Copy data segment if needed
+1: cmpne r5, r6
+ ldrne fp, [r4], #4
+ strne fp, [r5], #4
+ bne 1b
+
+ mov fp, #0 @ Clear BSS (and zero fp)
+1: cmp r6, r7
+ strcc fp, [r6],#4
+ bcc 1b
+
+ ldmia r3, {r4, r5, r6, sp}
+ str r9, [r4] @ Save processor ID
+ str r1, [r5] @ Save machine type
+ bic r4, r0, #CR_A @ Clear 'A' bit
+ stmia r6, {r0, r4} @ Save control register values
+ b start_kernel
+
+/*
+ * Exception handling. Something went wrong and we can't proceed. We
+ * ought to tell the user, but since we don't have any guarantee that
+ * we're even running on the right architecture, we do virtually nothing.
+ *
+ * If CONFIG_DEBUG_LL is set we try to print out something about the error
+ * and hope for the best (useful if bootloader fails to pass a proper
+ * machine ID for example).
+ */
+
+ .type __error_p, %function
+__error_p:
+#ifdef CONFIG_DEBUG_LL
+ adr r0, str_p1
+ bl printascii
+ b __error
+str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
+ .align
+#endif
+
+ .type __error_a, %function
+__error_a:
+#ifdef CONFIG_DEBUG_LL
+ mov r4, r1 @ preserve machine ID
+ adr r0, str_a1
+ bl printascii
+ mov r0, r4
+ bl printhex8
+ adr r0, str_a2
+ bl printascii
+ adr r3, 3f
+ ldmia r3, {r4, r5, r6} @ get machine desc list
+ sub r4, r3, r4 @ get offset between virt&phys
+ add r5, r5, r4 @ convert virt addresses to
+ add r6, r6, r4 @ physical address space
+1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
+ bl printhex8
+ mov r0, #'\t'
+ bl printch
+ ldr r0, [r5, #MACHINFO_NAME] @ get machine name
+ add r0, r0, r4
+ bl printascii
+ mov r0, #'\n'
+ bl printch
+ add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
+ cmp r5, r6
+ blo 1b
+ adr r0, str_a3
+ bl printascii
+ b __error
+str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
+str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
+str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
+ .align
+#endif
+
+ .type __error, %function
+__error:
+#ifdef CONFIG_ARCH_RPC
+/*
+ * Turn the screen red on a error - RiscPC only.
+ */
+ mov r0, #0x02000000
+ mov r3, #0x11
+ orr r3, r3, r3, lsl #8
+ orr r3, r3, r3, lsl #16
+ str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+#endif
+1: mov r0, r0
+ b 1b
+
+
+/*
+ * Read processor ID register (CP#15, CR0), and look up in the linker-built
+ * supported processor list. Note that we can't use the absolute addresses
+ * for the __proc_info lists since we aren't running with the MMU on
+ * (and therefore, we are not in the correct address space). We have to
+ * calculate the offset.
+ *
+ * r9 = cpuid
+ * Returns:
+ * r3, r4, r6 corrupted
+ * r5 = proc_info pointer in physical address space
+ * r9 = cpuid (preserved)
+ */
+ .type __lookup_processor_type, %function
+__lookup_processor_type:
+ adr r3, 3f
+ ldmda r3, {r5 - r7}
+ sub r3, r3, r7 @ get offset between virt&phys
+ add r5, r5, r3 @ convert virt addresses to
+ add r6, r6, r3 @ physical address space
+1: ldmia r5, {r3, r4} @ value, mask
+ and r4, r4, r9 @ mask wanted bits
+ teq r3, r4
+ beq 2f
+ add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
+ cmp r5, r6
+ blo 1b
+ mov r5, #0 @ unknown processor
+2: mov pc, lr
+
+/*
+ * This provides a C-API version of the above function.
+ */
+ENTRY(lookup_processor_type)
+ stmfd sp!, {r4 - r7, r9, lr}
+ mov r9, r0
+ bl __lookup_processor_type
+ mov r0, r5
+ ldmfd sp!, {r4 - r7, r9, pc}
+
+/*
+ * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
+ * more information about the __proc_info and __arch_info structures.
+ */
+ .long __proc_info_begin
+ .long __proc_info_end
+3: .long .
+ .long __arch_info_begin
+ .long __arch_info_end
+
+/*
+ * Lookup machine architecture in the linker-build list of architectures.
+ * Note that we can't use the absolute addresses for the __arch_info
+ * lists since we aren't running with the MMU on (and therefore, we are
+ * not in the correct address space). We have to calculate the offset.
+ *
+ * r1 = machine architecture number
+ * Returns:
+ * r3, r4, r6 corrupted
+ * r5 = mach_info pointer in physical address space
+ */
+ .type __lookup_machine_type, %function
+__lookup_machine_type:
+ adr r3, 3b
+ ldmia r3, {r4, r5, r6}
+ sub r3, r3, r4 @ get offset between virt&phys
+ add r5, r5, r3 @ convert virt addresses to
+ add r6, r6, r3 @ physical address space
+1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
+ teq r3, r1 @ matches loader number?
+ beq 2f @ found
+ add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
+ cmp r5, r6
+ blo 1b
+ mov r5, #0 @ unknown machine
+2: mov pc, lr
+
+/*
+ * This provides a C-API version of the above function.
+ */
+ENTRY(lookup_machine_type)
+ stmfd sp!, {r4 - r6, lr}
+ mov r1, r0
+ bl __lookup_machine_type
+ mov r0, r5
+ ldmfd sp!, {r4 - r6, pc}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
new file mode 100644
index 0000000000000..b093ab8738b51
--- /dev/null
+++ b/arch/arm/kernel/head-nommu.S
@@ -0,0 +1,83 @@
+/*
+ * linux/arch/arm/kernel/head-nommu.S
+ *
+ * Copyright (C) 1994-2002 Russell King
+ * Copyright (C) 2003-2006 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common kernel startup code (non-paged MM)
+ * for 32-bit CPUs which has a process ID register(CP15).
+ *
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/mach-types.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+#include <asm/constants.h>
+#include <asm/system.h>
+
+#define PROCINFO_INITFUNC 12
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * This is normally called from the decompressor code. The requirements
+ * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
+ * r1 = machine nr.
+ *
+ * See linux/arch/arm/tools/mach-types for the complete list of machine
+ * numbers for r1.
+ *
+ */
+ __INIT
+ .type stext, %function
+ENTRY(stext)
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
+ @ and irqs disabled
+ mrc p15, 0, r9, c0, c0 @ get processor id
+ bl __lookup_processor_type @ r5=procinfo r9=cpuid
+ movs r10, r5 @ invalid processor (r5=0)?
+ beq __error_p @ yes, error 'p'
+ bl __lookup_machine_type @ r5=machinfo
+ movs r8, r5 @ invalid machine (r5=0)?
+ beq __error_a @ yes, error 'a'
+
+ ldr r13, __switch_data @ address to jump to after
+ @ the initialization is done
+ adr lr, __after_proc_init @ return (PIC) address
+ add pc, r10, #PROCINFO_INITFUNC
+
+/*
+ * Set the Control Register and Read the process ID.
+ */
+ .type __after_proc_init, %function
+__after_proc_init:
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+#ifdef CONFIG_ALIGNMENT_TRAP
+ orr r0, r0, #CR_A
+#else
+ bic r0, r0, #CR_A
+#endif
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+ bic r0, r0, #CR_C
+#endif
+#ifdef CONFIG_CPU_BPREDICT_DISABLE
+ bic r0, r0, #CR_Z
+#endif
+#ifdef CONFIG_CPU_ICACHE_DISABLE
+ bic r0, r0, #CR_I
+#endif
+ mcr p15, 0, r0, c1, c0, 0 @ write control reg
+
+ mov pc, r13 @ clear the BSS and jump
+ @ to start_kernel
+
+#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 53b6901f70a69..04b66a9328ef8 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -102,49 +102,6 @@ ENTRY(stext)
adr lr, __enable_mmu @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC
- .type __switch_data, %object
-__switch_data:
- .long __mmap_switched
- .long __data_loc @ r4
- .long __data_start @ r5
- .long __bss_start @ r6
- .long _end @ r7
- .long processor_id @ r4
- .long __machine_arch_type @ r5
- .long cr_alignment @ r6
- .long init_thread_union + THREAD_START_SP @ sp
-
-/*
- * The following fragment of code is executed with the MMU on, and uses
- * absolute addresses; this is not position independent.
- *
- * r0 = cp#15 control register
- * r1 = machine ID
- * r9 = processor ID
- */
- .type __mmap_switched, %function
-__mmap_switched:
- adr r3, __switch_data + 4
-
- ldmia r3!, {r4, r5, r6, r7}
- cmp r4, r5 @ Copy data segment if needed
-1: cmpne r5, r6
- ldrne fp, [r4], #4
- strne fp, [r5], #4
- bne 1b
-
- mov fp, #0 @ Clear BSS (and zero fp)
-1: cmp r6, r7
- strcc fp, [r6],#4
- bcc 1b
-
- ldmia r3, {r4, r5, r6, sp}
- str r9, [r4] @ Save processor ID
- str r1, [r5] @ Save machine type
- bic r4, r0, #CR_A @ Clear 'A' bit
- stmia r6, {r0, r4} @ Save control register values
- b start_kernel
-
#if defined(CONFIG_SMP)
.type secondary_startup, #function
ENTRY(secondary_startup)
@@ -367,166 +324,4 @@ __create_page_tables:
mov pc, lr
.ltorg
-
-
-/*
- * Exception handling. Something went wrong and we can't proceed. We
- * ought to tell the user, but since we don't have any guarantee that
- * we're even running on the right architecture, we do virtually nothing.
- *
- * If CONFIG_DEBUG_LL is set we try to print out something about the error
- * and hope for the best (useful if bootloader fails to pass a proper
- * machine ID for example).
- */
-
- .type __error_p, %function
-__error_p:
-#ifdef CONFIG_DEBUG_LL
- adr r0, str_p1
- bl printascii
- b __error
-str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
- .align
-#endif
-
- .type __error_a, %function
-__error_a:
-#ifdef CONFIG_DEBUG_LL
- mov r4, r1 @ preserve machine ID
- adr r0, str_a1
- bl printascii
- mov r0, r4
- bl printhex8
- adr r0, str_a2
- bl printascii
- adr r3, 3f
- ldmia r3, {r4, r5, r6} @ get machine desc list
- sub r4, r3, r4 @ get offset between virt&phys
- add r5, r5, r4 @ convert virt addresses to
- add r6, r6, r4 @ physical address space
-1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
- bl printhex8
- mov r0, #'\t'
- bl printch
- ldr r0, [r5, #MACHINFO_NAME] @ get machine name
- add r0, r0, r4
- bl printascii
- mov r0, #'\n'
- bl printch
- add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
- cmp r5, r6
- blo 1b
- adr r0, str_a3
- bl printascii
- b __error
-str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
-str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
-str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
- .align
-#endif
-
- .type __error, %function
-__error:
-#ifdef CONFIG_ARCH_RPC
-/*
- * Turn the screen red on a error - RiscPC only.
- */
- mov r0, #0x02000000
- mov r3, #0x11
- orr r3, r3, r3, lsl #8
- orr r3, r3, r3, lsl #16
- str r3, [r0], #4
- str r3, [r0], #4
- str r3, [r0], #4
- str r3, [r0], #4
-#endif
-1: mov r0, r0
- b 1b
-
-
-/*
- * Read processor ID register (CP#15, CR0), and look up in the linker-built
- * supported processor list. Note that we can't use the absolute addresses
- * for the __proc_info lists since we aren't running with the MMU on
- * (and therefore, we are not in the correct address space). We have to
- * calculate the offset.
- *
- * r9 = cpuid
- * Returns:
- * r3, r4, r6 corrupted
- * r5 = proc_info pointer in physical address space
- * r9 = cpuid (preserved)
- */
- .type __lookup_processor_type, %function
-__lookup_processor_type:
- adr r3, 3f
- ldmda r3, {r5 - r7}
- sub r3, r3, r7 @ get offset between virt&phys
- add r5, r5, r3 @ convert virt addresses to
- add r6, r6, r3 @ physical address space
-1: ldmia r5, {r3, r4} @ value, mask
- and r4, r4, r9 @ mask wanted bits
- teq r3, r4
- beq 2f
- add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
- cmp r5, r6
- blo 1b
- mov r5, #0 @ unknown processor
-2: mov pc, lr
-
-/*
- * This provides a C-API version of the above function.
- */
-ENTRY(lookup_processor_type)
- stmfd sp!, {r4 - r7, r9, lr}
- mov r9, r0
- bl __lookup_processor_type
- mov r0, r5
- ldmfd sp!, {r4 - r7, r9, pc}
-
-/*
- * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
- * more information about the __proc_info and __arch_info structures.
- */
- .long __proc_info_begin
- .long __proc_info_end
-3: .long .
- .long __arch_info_begin
- .long __arch_info_end
-
-/*
- * Lookup machine architecture in the linker-build list of architectures.
- * Note that we can't use the absolute addresses for the __arch_info
- * lists since we aren't running with the MMU on (and therefore, we are
- * not in the correct address space). We have to calculate the offset.
- *
- * r1 = machine architecture number
- * Returns:
- * r3, r4, r6 corrupted
- * r5 = mach_info pointer in physical address space
- */
- .type __lookup_machine_type, %function
-__lookup_machine_type:
- adr r3, 3b
- ldmia r3, {r4, r5, r6}
- sub r3, r3, r4 @ get offset between virt&phys
- add r5, r5, r3 @ convert virt addresses to
- add r6, r6, r3 @ physical address space
-1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
- teq r3, r1 @ matches loader number?
- beq 2f @ found
- add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
- cmp r5, r6
- blo 1b
- mov r5, #0 @ unknown machine
-2: mov pc, lr
-
-/*
- * This provides a C-API version of the above function.
- */
-ENTRY(lookup_machine_type)
- stmfd sp!, {r4 - r6, lr}
- mov r1, r0
- bl __lookup_machine_type
- mov r0, r5
- ldmfd sp!, {r4 - r6, pc}
+#include "head-common.S"
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 489c069e5c3e8..1ff75cee4b0dd 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -474,4 +474,3 @@ unsigned long get_wchan(struct task_struct *p)
} while (count ++ < 16);
return 0;
}
-EXPORT_SYMBOL(get_wchan);
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 9991049c522d6..27beece155026 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -7,6 +7,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#define KERN_SIGRETURN_CODE 0xffff0500
+#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d566d5f4574d0..35230a0601081 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -688,6 +688,7 @@ EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
+ unsigned long vectors = CONFIG_VECTORS_BASE;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
@@ -698,9 +699,9 @@ void __init trap_init(void)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
- memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
- memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
+ memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
+ memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/*
* Copy signal return handlers into the vector page, and
@@ -709,6 +710,6 @@ void __init trap_init(void)
memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
sizeof(sigreturn_codes));
- flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
+ flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 68923b1d2b62b..d6d7260363613 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -141,6 +141,8 @@ struct corgissp_machinfo corgi_ssp_machinfo = {
*/
static struct corgibl_machinfo corgi_bl_machinfo = {
.max_intensity = 0x2f,
+ .default_intensity = 0x1f,
+ .limit_mask = 0x0b,
.set_bl_intensity = corgi_bl_set_intensity,
};
@@ -164,6 +166,14 @@ static struct platform_device corgikbd_device = {
/*
+ * Corgi LEDs
+ */
+static struct platform_device corgiled_device = {
+ .name = "corgi-led",
+ .id = -1,
+};
+
+/*
* Corgi Touch Screen Device
*/
static struct resource corgits_resources[] = {
@@ -297,6 +307,7 @@ static struct platform_device *devices[] __initdata = {
&corgikbd_device,
&corgibl_device,
&corgits_device,
+ &corgiled_device,
};
static void __init corgi_init(void)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 0dbb079ecd251..19b372df544a0 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -220,6 +220,8 @@ struct corgissp_machinfo spitz_ssp_machinfo = {
* Spitz Backlight Device
*/
static struct corgibl_machinfo spitz_bl_machinfo = {
+ .default_intensity = 0x1f,
+ .limit_mask = 0x0b,
.max_intensity = 0x2f,
};
@@ -242,6 +244,14 @@ static struct platform_device spitzkbd_device = {
/*
+ * Spitz LEDs
+ */
+static struct platform_device spitzled_device = {
+ .name = "spitz-led",
+ .id = -1,
+};
+
+/*
* Spitz Touch Screen Device
*/
static struct resource spitzts_resources[] = {
@@ -418,6 +428,7 @@ static struct platform_device *devices[] __initdata = {
&spitzkbd_device,
&spitzts_device,
&spitzbl_device,
+ &spitzled_device,
};
static void __init common_init(void)
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 66ec71756d0f2..76c0e7f0a219c 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -251,10 +251,19 @@ static struct platform_device tosakbd_device = {
.id = -1,
};
+/*
+ * Tosa LEDs
+ */
+static struct platform_device tosaled_device = {
+ .name = "tosa-led",
+ .id = -1,
+};
+
static struct platform_device *devices[] __initdata = {
&tosascoop_device,
&tosascoop_jc_device,
&tosakbd_device,
+ &tosaled_device,
};
static void __init tosa_init(void)
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index f90513e9af0c3..b9dfce57c272c 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -30,6 +30,7 @@
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm26/kernel/armksyms.c b/arch/arm26/kernel/armksyms.c
index 811a6376c6243..a6a1b33734440 100644
--- a/arch/arm26/kernel/armksyms.c
+++ b/arch/arm26/kernel/armksyms.c
@@ -212,8 +212,6 @@ EXPORT_SYMBOL(sys_open);
EXPORT_SYMBOL(sys_exit);
EXPORT_SYMBOL(sys_wait4);
-EXPORT_SYMBOL(get_wchan);
-
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(kernel_flag);
#endif
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index aa6b7d0a21093..07c8ffa0dd391 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -79,8 +79,6 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__outsl_ns);
EXPORT_SYMBOL(__insl_ns);
-EXPORT_SYMBOL(get_wchan);
-
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
EXPORT_SYMBOL(atomic_test_and_OR_mask);
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index 69d6ad32d56c8..b6cd78c972bb4 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -55,8 +55,6 @@ EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(get_wchan);
-
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index eb5279d23b7f7..6273bf74c2031 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -415,6 +415,7 @@ void __init init_bsp_APIC(void)
void __devinit setup_local_APIC(void)
{
unsigned long oldvalue, value, ver, maxlvt;
+ int i, j;
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (esr_disable) {
@@ -452,6 +453,25 @@ void __devinit setup_local_APIC(void)
apic_write_around(APIC_TASKPRI, value);
/*
+ * After a crash, we no longer service the interrupts and a pending
+ * interrupt from previous kernel might still have ISR bit set.
+ *
+ * Most probably by now CPU has serviced that pending interrupt and
+ * it might not have done the ack_APIC_irq() because it thought,
+ * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
+ * does not clear the ISR bit and cpu thinks it has already serivced
+ * the interrupt. Hence a vector might get locked. It was noticed
+ * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
+ */
+ for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+ value = apic_read(APIC_ISR + i*0x10);
+ for (j = 31; j >= 0; j--) {
+ if (value & (1<<j))
+ ack_APIC_irq();
+ }
+ }
+
+ /*
* Now that we are all set up, enable the APIC
*/
value = apic_read(APIC_SPIV);
@@ -732,7 +752,7 @@ static int __init apic_set_verbosity(char *str)
printk(KERN_WARNING "APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug\n", str);
- return 0;
+ return 1;
}
__setup("apic=", apic_set_verbosity);
diff --git a/arch/i386/kernel/cpu/mcheck/mce.c b/arch/i386/kernel/cpu/mcheck/mce.c
index 6170af3c271ae..afa0888f9a1e8 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.c
+++ b/arch/i386/kernel/cpu/mcheck/mce.c
@@ -64,13 +64,13 @@ void mcheck_init(struct cpuinfo_x86 *c)
static int __init mcheck_disable(char *str)
{
mce_disabled = 1;
- return 0;
+ return 1;
}
static int __init mcheck_enable(char *str)
{
mce_disabled = -1;
- return 0;
+ return 1;
}
__setup("nomce", mcheck_disable);
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index e3c5fca0aa8ad..2b0cfce24a619 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -69,7 +69,7 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
* for the data I pass, and I need tags
* on the data to indicate what information I have
* squirrelled away. ELF notes happen to provide
- * all of that that no need to invent something new.
+ * all of that, so there is no need to invent something new.
*/
buf = (u32*)per_cpu_ptr(crash_notes, cpu);
if (!buf)
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 3b329af4afc53..f8f132aa54725 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -644,7 +644,7 @@ failed:
int __init irqbalance_disable(char *str)
{
irqbalance_disabled = 1;
- return 0;
+ return 1;
}
__setup("noirqbalance", irqbalance_disable);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 24b3e745478b9..6259afea46d17 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -781,7 +781,6 @@ unsigned long get_wchan(struct task_struct *p)
} while (count++ < 16);
return 0;
}
-EXPORT_SYMBOL(get_wchan);
/*
* sys_alloc_thread_area: get a yet unused TLS descriptor index.
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 8c08660b4e5dd..eacc3f0a2ea4d 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -34,6 +34,7 @@
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
+#include <linux/platform_device.h>
#include <linux/console.h>
#include <linux/mca.h>
#include <linux/root_dev.h>
@@ -1547,6 +1548,23 @@ void __init setup_arch(char **cmdline_p)
#endif
}
+static __init int add_pcspkr(void)
+{
+ struct platform_device *pd;
+ int ret;
+
+ pd = platform_device_alloc("pcspkr", -1);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ platform_device_put(pd);
+
+ return ret;
+}
+device_initcall(add_pcspkr);
+
#include "setup_arch_post.h"
/*
* Local Variables:
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index ce3ef4fa05519..4f58b9c0efe3c 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -313,3 +313,4 @@ ENTRY(sys_call_table)
.long sys_set_robust_list
.long sys_get_robust_list
.long sys_splice
+ .long sys_sync_file_range
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 6b63a5aa1e46f..e38527994590f 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -1193,6 +1193,6 @@ void __init trap_init(void)
static int __init kstack_setup(char *s)
{
kstack_depth_to_print = simple_strtoul(s, NULL, 0);
- return 0;
+ return 1;
}
__setup("kstack=", kstack_setup);
diff --git a/arch/i386/kernel/vsyscall-sigreturn.S b/arch/i386/kernel/vsyscall-sigreturn.S
index fadb5bc3c3743..a92262f416599 100644
--- a/arch/i386/kernel/vsyscall-sigreturn.S
+++ b/arch/i386/kernel/vsyscall-sigreturn.S
@@ -44,7 +44,7 @@ __kernel_rt_sigreturn:
.LSTARTCIEDLSI1:
.long 0 /* CIE ID */
.byte 1 /* Version number */
- .string "zR" /* NUL-terminated augmentation string */
+ .string "zRS" /* NUL-terminated augmentation string */
.uleb128 1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 8 /* Return address register column */
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 89faa603c6be2..6386f63c413e6 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -240,7 +240,7 @@ cache_info(char *page)
}
p += sprintf(p,
"%s Cache level %lu:\n"
- "\tSize : %lu bytes\n"
+ "\tSize : %u bytes\n"
"\tAttributes : ",
cache_types[j+cci.pcci_unified], i+1,
cci.pcci_cache_size);
@@ -648,9 +648,9 @@ frequency_info(char *page)
if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
p += sprintf(p,
- "Processor/Clock ratio : %ld/%ld\n"
- "Bus/Clock ratio : %ld/%ld\n"
- "ITC/Clock ratio : %ld/%ld\n",
+ "Processor/Clock ratio : %d/%d\n"
+ "Bus/Clock ratio : %d/%d\n"
+ "ITC/Clock ratio : %d/%d\n",
proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
return p - page;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ac167436e9364..49958904045b2 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -188,7 +188,7 @@ ia64_init_itm (void)
itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
- printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
+ printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
"ITC freq=%lu.%03luMHz", smp_processor_id(),
platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 3b6fd798c4d68..b47476d655f13 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -9,6 +9,8 @@
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
* Populate cpu entries in sysfs for non-numa systems as well
* Intel Corporation - Ashok Raj
+ * 02/27/2006 Zhang, Yanmin
+ * Populate cpu cache entries in sysfs for cpu cache info
*/
#include <linux/config.h>
@@ -19,6 +21,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/nodemask.h>
+#include <linux/notifier.h>
#include <asm/mmzone.h>
#include <asm/numa.h>
#include <asm/cpu.h>
@@ -101,3 +104,367 @@ out:
}
subsys_initcall(topology_init);
+
+
+/*
+ * Export cpu cache information through sysfs
+ */
+
+/*
+ * A bunch of string array to get pretty printing
+ */
+static const char *cache_types[] = {
+ "", /* not used */
+ "Instruction",
+ "Data",
+ "Unified" /* unified */
+};
+
+static const char *cache_mattrib[]={
+ "WriteThrough",
+ "WriteBack",
+ "", /* reserved */
+ "" /* reserved */
+};
+
+struct cache_info {
+ pal_cache_config_info_t cci;
+ cpumask_t shared_cpu_map;
+ int level;
+ int type;
+ struct kobject kobj;
+};
+
+struct cpu_cache_info {
+ struct cache_info *cache_leaves;
+ int num_cache_leaves;
+ struct kobject kobj;
+};
+
+static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
+#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
+
+#ifdef CONFIG_SMP
+static void cache_shared_cpu_map_setup( unsigned int cpu,
+ struct cache_info * this_leaf)
+{
+ pal_cache_shared_info_t csi;
+ int num_shared, i = 0;
+ unsigned int j;
+
+ if (cpu_data(cpu)->threads_per_core <= 1 &&
+ cpu_data(cpu)->cores_per_socket <= 1) {
+ cpu_set(cpu, this_leaf->shared_cpu_map);
+ return;
+ }
+
+ if (ia64_pal_cache_shared_info(this_leaf->level,
+ this_leaf->type,
+ 0,
+ &csi) != PAL_STATUS_SUCCESS)
+ return;
+
+ num_shared = (int) csi.num_shared;
+ do {
+ for_each_cpu(j)
+ if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
+ && cpu_data(j)->core_id == csi.log1_cid
+ && cpu_data(j)->thread_id == csi.log1_tid)
+ cpu_set(j, this_leaf->shared_cpu_map);
+
+ i++;
+ } while (i < num_shared &&
+ ia64_pal_cache_shared_info(this_leaf->level,
+ this_leaf->type,
+ i,
+ &csi) == PAL_STATUS_SUCCESS);
+}
+#else
+static void cache_shared_cpu_map_setup(unsigned int cpu,
+ struct cache_info * this_leaf)
+{
+ cpu_set(cpu, this_leaf->shared_cpu_map);
+ return;
+}
+#endif
+
+static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
+}
+
+static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
+}
+
+static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
+{
+ return sprintf(buf,
+ "%s\n",
+ cache_mattrib[this_leaf->cci.pcci_cache_attr]);
+}
+
+static ssize_t show_size(struct cache_info *this_leaf, char *buf)
+{
+ return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
+}
+
+static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
+{
+ unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
+ number_of_sets /= this_leaf->cci.pcci_assoc;
+ number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
+
+ return sprintf(buf, "%u\n", number_of_sets);
+}
+
+static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
+{
+ ssize_t len;
+ cpumask_t shared_cpu_map;
+
+ cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
+ len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
+ len += sprintf(buf+len, "\n");
+ return len;
+}
+
+static ssize_t show_type(struct cache_info *this_leaf, char *buf)
+{
+ int type = this_leaf->type + this_leaf->cci.pcci_unified;
+ return sprintf(buf, "%s\n", cache_types[type]);
+}
+
+static ssize_t show_level(struct cache_info *this_leaf, char *buf)
+{
+ return sprintf(buf, "%u\n", this_leaf->level);
+}
+
+struct cache_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cache_info *, char *);
+ ssize_t (*store)(struct cache_info *, const char *, size_t count);
+};
+
+#ifdef define_one_ro
+ #undef define_one_ro
+#endif
+#define define_one_ro(_name) \
+ static struct cache_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+define_one_ro(level);
+define_one_ro(type);
+define_one_ro(coherency_line_size);
+define_one_ro(ways_of_associativity);
+define_one_ro(size);
+define_one_ro(number_of_sets);
+define_one_ro(shared_cpu_map);
+define_one_ro(attributes);
+
+static struct attribute * cache_default_attrs[] = {
+ &type.attr,
+ &level.attr,
+ &coherency_line_size.attr,
+ &ways_of_associativity.attr,
+ &attributes.attr,
+ &size.attr,
+ &number_of_sets.attr,
+ &shared_cpu_map.attr,
+ NULL
+};
+
+#define to_object(k) container_of(k, struct cache_info, kobj)
+#define to_attr(a) container_of(a, struct cache_attr, attr)
+
+static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
+{
+ struct cache_attr *fattr = to_attr(attr);
+ struct cache_info *this_leaf = to_object(kobj);
+ ssize_t ret;
+
+ ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
+ return ret;
+}
+
+static struct sysfs_ops cache_sysfs_ops = {
+ .show = cache_show
+};
+
+static struct kobj_type cache_ktype = {
+ .sysfs_ops = &cache_sysfs_ops,
+ .default_attrs = cache_default_attrs,
+};
+
+static struct kobj_type cache_ktype_percpu_entry = {
+ .sysfs_ops = &cache_sysfs_ops,
+};
+
+static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
+{
+ if (all_cpu_cache_info[cpu].cache_leaves) {
+ kfree(all_cpu_cache_info[cpu].cache_leaves);
+ all_cpu_cache_info[cpu].cache_leaves = NULL;
+ }
+ all_cpu_cache_info[cpu].num_cache_leaves = 0;
+ memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
+
+ return;
+}
+
+static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
+{
+ u64 i, levels, unique_caches;
+ pal_cache_config_info_t cci;
+ int j;
+ s64 status;
+ struct cache_info *this_cache;
+ int num_cache_leaves = 0;
+
+ if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
+ printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
+ return -1;
+ }
+
+ this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
+ GFP_KERNEL);
+ if (this_cache == NULL)
+ return -ENOMEM;
+
+ for (i=0; i < levels; i++) {
+ for (j=2; j >0 ; j--) {
+ if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
+ PAL_STATUS_SUCCESS)
+ continue;
+
+ this_cache[num_cache_leaves].cci = cci;
+ this_cache[num_cache_leaves].level = i + 1;
+ this_cache[num_cache_leaves].type = j;
+
+ cache_shared_cpu_map_setup(cpu,
+ &this_cache[num_cache_leaves]);
+ num_cache_leaves ++;
+ }
+ }
+
+ all_cpu_cache_info[cpu].cache_leaves = this_cache;
+ all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
+
+ memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
+
+ return 0;
+}
+
+/* Add cache interface for CPU device */
+static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+{
+ unsigned int cpu = sys_dev->id;
+ unsigned long i, j;
+ struct cache_info *this_object;
+ int retval = 0;
+ cpumask_t oldmask;
+
+ if (all_cpu_cache_info[cpu].kobj.parent)
+ return 0;
+
+ oldmask = current->cpus_allowed;
+ retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ if (unlikely(retval))
+ return retval;
+
+ retval = cpu_cache_sysfs_init(cpu);
+ set_cpus_allowed(current, oldmask);
+ if (unlikely(retval < 0))
+ return retval;
+
+ all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
+ kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
+ all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
+ retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
+
+ for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
+ this_object = LEAF_KOBJECT_PTR(cpu,i);
+ this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
+ kobject_set_name(&(this_object->kobj), "index%1lu", i);
+ this_object->kobj.ktype = &cache_ktype;
+ retval = kobject_register(&(this_object->kobj));
+ if (unlikely(retval)) {
+ for (j = 0; j < i; j++) {
+ kobject_unregister(
+ &(LEAF_KOBJECT_PTR(cpu,j)->kobj));
+ }
+ kobject_unregister(&all_cpu_cache_info[cpu].kobj);
+ cpu_cache_sysfs_exit(cpu);
+ break;
+ }
+ }
+ return retval;
+}
+
+/* Remove cache interface for CPU device */
+static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+{
+ unsigned int cpu = sys_dev->id;
+ unsigned long i;
+
+ for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
+ kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
+
+ if (all_cpu_cache_info[cpu].kobj.parent) {
+ kobject_unregister(&all_cpu_cache_info[cpu].kobj);
+ memset(&all_cpu_cache_info[cpu].kobj,
+ 0,
+ sizeof(struct kobject));
+ }
+
+ cpu_cache_sysfs_exit(cpu);
+
+ return 0;
+}
+
+/*
+ * When a cpu is hot-plugged, do a check and initiate
+ * cache kobject if necessary
+ */
+static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct sys_device *sys_dev;
+
+ sys_dev = get_cpu_sysdev(cpu);
+ switch (action) {
+ case CPU_ONLINE:
+ cache_add_dev(sys_dev);
+ break;
+ case CPU_DEAD:
+ cache_remove_dev(sys_dev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cache_cpu_notifier =
+{
+ .notifier_call = cache_cpu_callback
+};
+
+static int __cpuinit cache_sysfs_init(void)
+{
+ int i;
+
+ for_each_online_cpu(i) {
+ cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
+ (void *)(long)i);
+ }
+
+ register_cpu_notifier(&cache_cpu_notifier);
+
+ return 0;
+}
+
+device_initcall(cache_sysfs_init);
+
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 3d7f2000b7146..c3319514a85ed 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -79,4 +79,3 @@ EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
-EXPORT_SYMBOL(get_wchan);
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index d844c755945a2..f9b4ea16c0998 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -57,8 +57,6 @@ EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
-EXPORT_SYMBOL(get_wchan);
-
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5080ea1799a43..e15709ce88662 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -233,6 +233,7 @@ config MACH_JAZZ
select ARC32
select ARCH_MAY_HAVE_PC_FDC
select GENERIC_ISA_DMA
+ select I8253
select I8259
select ISA
select SYS_HAS_CPU_R4X00
@@ -530,6 +531,7 @@ config QEMU
select DMA_COHERENT
select GENERIC_ISA_DMA
select HAVE_STD_PC_SERIAL_PORT
+ select I8253
select I8259
select ISA
select SWAP_IO_SPACE
@@ -714,6 +716,7 @@ config SNI_RM200_PCI
select HAVE_STD_PC_SERIAL_PORT
select HW_HAS_EISA
select HW_HAS_PCI
+ select I8253
select I8259
select ISA
select SYS_HAS_CPU_R4X00
@@ -1721,6 +1724,9 @@ config MMU
bool
default y
+config I8253
+ bool
+
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index f36c4f20ee8af..309d54cceda3c 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -59,6 +59,8 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
+obj-$(CONFIG_I8253) += i8253.o
+
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
new file mode 100644
index 0000000000000..475df6904219b
--- /dev/null
+++ b/arch/mips/kernel/i8253.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2006 IBM Corporation
+ *
+ * Implements device information for i8253 timer chip
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation
+ */
+
+#include <linux/platform_device.h>
+
+static __init int add_pcspkr(void)
+{
+ struct platform_device *pd;
+ int ret;
+
+ pd = platform_device_alloc("pcspkr", -1);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ platform_device_put(pd);
+
+ return ret;
+}
+device_initcall(add_pcspkr);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index a8f435d829403..c66db5e5ab624 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -419,4 +419,3 @@ unsigned long get_wchan(struct task_struct *p)
return pc;
}
-EXPORT_SYMBOL(get_wchan);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 211d72653ea68..764d073297165 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -61,7 +61,7 @@ static int __init parse_elfcorehdr(char *p)
if (p)
elfcorehdr_addr = memparse(p, &p);
- return 0;
+ return 1;
}
__setup("elfcorehdr=", parse_elfcorehdr);
#endif
@@ -71,7 +71,7 @@ static int __init parse_savemaxmem(char *p)
if (p)
saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
- return 0;
+ return 1;
}
__setup("savemaxmem=", parse_savemaxmem);
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 1b73508ecb2bd..2cbde865d4f57 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -37,7 +37,7 @@
#include <asm/prom.h>
#include <asm/vdso_datapage.h>
-#define MODULE_VERS "1.6"
+#define MODULE_VERS "1.7"
#define MODULE_NAME "lparcfg"
/* #define LPARCFG_DEBUG */
@@ -149,17 +149,17 @@ static void log_plpar_hcall_return(unsigned long rc, char *tag)
if (rc == 0) /* success, return */
return;
/* check for null tag ? */
- if (rc == H_Hardware)
+ if (rc == H_HARDWARE)
printk(KERN_INFO
"plpar-hcall (%s) failed with hardware fault\n", tag);
- else if (rc == H_Function)
+ else if (rc == H_FUNCTION)
printk(KERN_INFO
"plpar-hcall (%s) failed; function not allowed\n", tag);
- else if (rc == H_Authority)
+ else if (rc == H_AUTHORITY)
printk(KERN_INFO
- "plpar-hcall (%s) failed; not authorized to this function\n",
- tag);
- else if (rc == H_Parameter)
+ "plpar-hcall (%s) failed; not authorized to this"
+ " function\n", tag);
+ else if (rc == H_PARAMETER)
printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
tag);
else
@@ -209,7 +209,7 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs)
unsigned long dummy;
rc = plpar_hcall(H_PIC, 0, 0, 0, 0, pool_idle_time, num_procs, &dummy);
- if (rc != H_Authority)
+ if (rc != H_AUTHORITY)
log_plpar_hcall_return(rc, "H_PIC");
}
@@ -242,7 +242,7 @@ static void parse_system_parameter_string(struct seq_file *m)
{
int call_status;
- char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
+ unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
if (!local_buffer) {
printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
__FILE__, __FUNCTION__, __LINE__);
@@ -254,7 +254,8 @@ static void parse_system_parameter_string(struct seq_file *m)
call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
NULL,
SPLPAR_CHARACTERISTICS_TOKEN,
- __pa(rtas_data_buf));
+ __pa(rtas_data_buf),
+ RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
spin_unlock(&rtas_data_buf_lock);
@@ -275,7 +276,7 @@ static void parse_system_parameter_string(struct seq_file *m)
#ifdef LPARCFG_DEBUG
printk(KERN_INFO "success calling get-system-parameter \n");
#endif
- splpar_strlen = local_buffer[0] * 16 + local_buffer[1];
+ splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
local_buffer += 2; /* step over strlen value */
memset(workbuffer, 0, SPLPAR_MAXLENGTH);
@@ -529,13 +530,13 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
*new_weight_ptr);
- if (retval == H_Success || retval == H_Constrained) {
+ if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
retval = count;
- } else if (retval == H_Busy) {
+ } else if (retval == H_BUSY) {
retval = -EBUSY;
- } else if (retval == H_Hardware) {
+ } else if (retval == H_HARDWARE) {
retval = -EIO;
- } else if (retval == H_Parameter) {
+ } else if (retval == H_PARAMETER) {
retval = -EINVAL;
} else {
printk(KERN_WARNING "%s: received unknown hv return code %ld",
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 706090c99f47c..2dd47d2dd9980 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -834,7 +834,6 @@ unsigned long get_wchan(struct task_struct *p)
} while (count++ < 16);
return 0;
}
-EXPORT_SYMBOL(get_wchan);
static int kstack_depth_to_print = 64;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 06636c927a7ec..0112318213ab5 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -578,18 +578,18 @@ static void rtas_percpu_suspend_me(void *info)
* We use "waiting" to indicate our state. As long
* as it is >0, we are still trying to all join up.
* If it goes to 0, we have successfully joined up and
- * one thread got H_Continue. If any error happens,
+ * one thread got H_CONTINUE. If any error happens,
* we set it to <0.
*/
local_irq_save(flags);
do {
rc = plpar_hcall_norets(H_JOIN);
smp_rmb();
- } while (rc == H_Success && data->waiting > 0);
- if (rc == H_Success)
+ } while (rc == H_SUCCESS && data->waiting > 0);
+ if (rc == H_SUCCESS)
goto out;
- if (rc == H_Continue) {
+ if (rc == H_CONTINUE) {
data->waiting = 0;
data->args->args[data->args->nargs] =
rtas_call(ibm_suspend_me_token, 0, 1, NULL);
@@ -597,7 +597,7 @@ static void rtas_percpu_suspend_me(void *info)
plpar_hcall_norets(H_PROD,i);
} else {
data->waiting = -EBUSY;
- printk(KERN_ERR "Error on H_Join hypervisor call\n");
+ printk(KERN_ERR "Error on H_JOIN hypervisor call\n");
}
out:
@@ -624,7 +624,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
printk(KERN_ERR "Error doing global join\n");
/* Prod each CPU. This won't hurt, and will wake
- * anyone we successfully put to sleep with H_Join
+ * anyone we successfully put to sleep with H_JOIN.
*/
for_each_possible_cpu(i)
plpar_hcall_norets(H_PROD, i);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index c607f3b9ca174..1d93e73a70034 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -21,6 +21,7 @@
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/initrd.h>
+#include <linux/platform_device.h>
#include <linux/ide.h>
#include <linux/seq_file.h>
#include <linux/ioport.h>
@@ -462,6 +463,29 @@ static int __init early_xmon(char *p)
early_param("xmon", early_xmon);
#endif
+static __init int add_pcspkr(void)
+{
+ struct device_node *np;
+ struct platform_device *pd;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
+ of_node_put(np);
+ if (!np)
+ return -ENODEV;
+
+ pd = platform_device_alloc("pcspkr", -1);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ platform_device_put(pd);
+
+ return ret;
+}
+device_initcall(add_pcspkr);
+
void probe_machine(void)
{
extern struct machdep_calls __machine_desc_start;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a72bf5dceeee5..69ac257013442 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -50,7 +50,6 @@
#include <asm/kgdb.h>
#endif
-extern void platform_init(void);
extern void bootx_init(unsigned long r4, unsigned long phys);
boot_infos_t *boot_infos;
@@ -138,12 +137,7 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
#endif /* CONFIG_CMDLINE */
-#ifdef CONFIG_PPC_MULTIPLATFORM
probe_machine();
-#else
- /* Base init based on machine type. Obsoloete, please kill ! */
- platform_init();
-#endif
#ifdef CONFIG_6xx
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 59aa92cd6fa4e..13e91c4d70a8f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -215,12 +215,10 @@ void __init early_setup(unsigned long dt_ptr)
/*
* Initialize stab / SLB management except on iSeries
*/
- if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
- if (cpu_has_feature(CPU_FTR_SLB))
- slb_initialize();
- else
- stab_initialize(get_paca()->stab_real);
- }
+ if (cpu_has_feature(CPU_FTR_SLB))
+ slb_initialize();
+ else if (!firmware_has_feature(FW_FEATURE_ISERIES))
+ stab_initialize(get_paca()->stab_real);
DBG(" <- early_setup()\n");
}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 1ad55f0466fd6..1424eab450ee9 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -322,3 +322,4 @@ SYSCALL(spu_create)
COMPAT_SYS(pselect6)
COMPAT_SYS(ppoll)
SYSCALL(unshare)
+SYSCALL(splice)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4cbde211eb699..064a525646922 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -228,7 +228,7 @@ void system_reset_exception(struct pt_regs *regs)
*/
static inline int check_io_access(struct pt_regs *regs)
{
-#ifdef CONFIG_PPC_PMAC
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
unsigned long msr = regs->msr;
const struct exception_table_entry *entry;
unsigned int *nip = (unsigned int *)regs->nip;
@@ -261,7 +261,7 @@ static inline int check_io_access(struct pt_regs *regs)
return 1;
}
}
-#endif /* CONFIG_PPC_PMAC */
+#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
return 0;
}
@@ -308,8 +308,8 @@ platform_machine_check(struct pt_regs *regs)
void machine_check_exception(struct pt_regs *regs)
{
-#ifdef CONFIG_PPC64
int recover = 0;
+ unsigned long reason = get_mc_reason(regs);
/* See if any machine dependent calls */
if (ppc_md.machine_check_exception)
@@ -317,8 +317,6 @@ void machine_check_exception(struct pt_regs *regs)
if (recover)
return;
-#else
- unsigned long reason = get_mc_reason(regs);
if (user_mode(regs)) {
regs->msr |= MSR_RI;
@@ -462,7 +460,6 @@ void machine_check_exception(struct pt_regs *regs)
* additional info, e.g. bus error registers.
*/
platform_machine_check(regs);
-#endif /* CONFIG_PPC64 */
if (debugger_fault_handler(regs))
return;
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S
index e04642781917b..0c6a37b29dde0 100644
--- a/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/arch/powerpc/kernel/vdso32/sigtramp.S
@@ -261,7 +261,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt32)
.Lcie_start:
.long 0 /* CIE ID */
.byte 1 /* Version number */
- .string "zR" /* NUL-terminated augmentation string */
+ .string "zRS" /* NUL-terminated augmentation string */
.uleb128 4 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 67 /* Return address register column, ap */
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index 31b604ab56dea..7479edb101b8c 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -263,7 +263,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
.Lcie_start:
.long 0 /* CIE ID */
.byte 1 /* Version number */
- .string "zR" /* NUL-terminated augmentation string */
+ .string "zRS" /* NUL-terminated augmentation string */
.uleb128 4 /* Code alignment factor */
.sleb128 -8 /* Data alignment factor */
.byte 67 /* Return address register column, ap */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 5aea0909a5ec9..fdbba4206d59e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -177,15 +177,15 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunatly, in the case of an
- * erroneous fault occuring in a code path which already holds mmap_sem
+ * kernel and should generate an OOPS. Unfortunately, in the case of an
+ * erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibilty of a deadlock.
+ * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c
index 7c18b4cd5db43..7e789d2420ba4 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_sys.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_sys.c
@@ -158,25 +158,25 @@ static int __init mpc834x_rtc_hookup(void)
late_initcall(mpc834x_rtc_hookup);
#endif
-void __init platform_init(void)
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init mpc834x_sys_probe(void)
{
- /* setup the PowerPC module struct */
- ppc_md.setup_arch = mpc834x_sys_setup_arch;
-
- ppc_md.init_IRQ = mpc834x_sys_init_IRQ;
- ppc_md.get_irq = ipic_get_irq;
-
- ppc_md.restart = mpc83xx_restart;
-
- ppc_md.time_init = mpc83xx_time_init;
- ppc_md.set_rtc_time = NULL;
- ppc_md.get_rtc_time = NULL;
- ppc_md.calibrate_decr = generic_calibrate_decr;
-
- ppc_md.progress = udbg_progress;
-
- if (ppc_md.progress)
- ppc_md.progress("mpc834x_sys_init(): exit", 0);
-
- return;
+ /* We always match for now, eventually we should look at the flat
+ dev tree to ensure this is the board we are suppose to run on
+ */
+ return 1;
}
+
+define_machine(mpc834x_sys) {
+ .name = "MPC834x SYS",
+ .probe = mpc834x_sys_probe,
+ .setup_arch = mpc834x_sys_setup_arch,
+ .init_IRQ = mpc834x_sys_init_IRQ,
+ .get_irq = ipic_get_irq,
+ .restart = mpc83xx_restart,
+ .time_init = mpc83xx_time_init,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index b7821dbae00d6..5eeff370f5fca 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -220,25 +220,25 @@ void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
}
-void __init platform_init(void)
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mpc85xx_ads_probe(void)
{
- ppc_md.setup_arch = mpc85xx_ads_setup_arch;
- ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo;
-
- ppc_md.init_IRQ = mpc85xx_ads_pic_init;
- ppc_md.get_irq = mpic_get_irq;
-
- ppc_md.restart = mpc85xx_restart;
- ppc_md.power_off = NULL;
- ppc_md.halt = NULL;
-
- ppc_md.time_init = NULL;
- ppc_md.set_rtc_time = NULL;
- ppc_md.get_rtc_time = NULL;
- ppc_md.calibrate_decr = generic_calibrate_decr;
-
- ppc_md.progress = udbg_progress;
-
- if (ppc_md.progress)
- ppc_md.progress("mpc85xx_ads platform_init(): exit", 0);
+ /* We always match for now, eventually we should look at the flat
+ dev tree to ensure this is the board we are suppose to run on
+ */
+ return 1;
}
+
+define_machine(mpc85xx_ads) {
+ .name = "MPC85xx ADS",
+ .probe = mpc85xx_ads_probe,
+ .setup_arch = mpc85xx_ads_setup_arch,
+ .init_IRQ = mpc85xx_ads_pic_init,
+ .show_cpuinfo = mpc85xx_ads_show_cpuinfo,
+ .get_irq = mpic_get_irq,
+ .restart = mpc85xx_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index 3a4245c926ade..6594bec738824 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -316,6 +316,7 @@ void *spu_syscall_table[] = {
[__NR_pselect6] sys_ni_syscall, /* sys_pselect */
[__NR_ppoll] sys_ni_syscall, /* sys_ppoll */
[__NR_unshare] sys_unshare,
+ [__NR_splice] sys_splice,
};
long spu_sys_callback(struct spu_syscall_block *s)
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c04e078c0fe57..483c8b76232c0 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -2,6 +2,7 @@
#include <linux/ptrace.h>
#include <asm/spu.h>
+#include <asm/unistd.h>
#include "spufs.h"
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 9b2b1cb117b37..780fb27a0099e 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -865,7 +865,7 @@ void __init eeh_init(void)
* on the CEC architecture, type of the device, on earlier boot
* command-line arguments & etc.
*/
-void eeh_add_device_early(struct device_node *dn)
+static void eeh_add_device_early(struct device_node *dn)
{
struct pci_controller *phb;
struct eeh_early_enable_info info;
@@ -882,7 +882,6 @@ void eeh_add_device_early(struct device_node *dn)
info.buid_lo = BUID_LO(phb->buid);
early_enable_eeh(dn, &info);
}
-EXPORT_SYMBOL_GPL(eeh_add_device_early);
void eeh_add_device_tree_early(struct device_node *dn)
{
@@ -893,20 +892,6 @@ void eeh_add_device_tree_early(struct device_node *dn)
}
EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
-void eeh_add_device_tree_late(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- eeh_add_device_late(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- struct pci_bus *subbus = dev->subordinate;
- if (subbus)
- eeh_add_device_tree_late(subbus);
- }
- }
-}
-
/**
* eeh_add_device_late - perform EEH initialization for the indicated pci device
* @dev: pci device for which to set up EEH
@@ -914,7 +899,7 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
* This routine must be used to complete EEH initialization for PCI
* devices that were added after system boot (e.g. hotplug, dlpar).
*/
-void eeh_add_device_late(struct pci_dev *dev)
+static void eeh_add_device_late(struct pci_dev *dev)
{
struct device_node *dn;
struct pci_dn *pdn;
@@ -933,16 +918,33 @@ void eeh_add_device_late(struct pci_dev *dev)
pci_addr_cache_insert_device (dev);
}
-EXPORT_SYMBOL_GPL(eeh_add_device_late);
+
+void eeh_add_device_tree_late(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ eeh_add_device_late(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ struct pci_bus *subbus = dev->subordinate;
+ if (subbus)
+ eeh_add_device_tree_late(subbus);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
/**
* eeh_remove_device - undo EEH setup for the indicated pci device
* @dev: pci device to be removed
*
- * This routine should be when a device is removed from a running
- * system (e.g. by hotplug or dlpar).
+ * This routine should be called when a device is removed from
+ * a running system (e.g. by hotplug or dlpar). It unregisters
+ * the PCI device from the EEH subsystem. I/O errors affecting
+ * this device will no longer be detected after this call; thus,
+ * i/o errors affecting this slot may leave this device unusable.
*/
-void eeh_remove_device(struct pci_dev *dev)
+static void eeh_remove_device(struct pci_dev *dev)
{
struct device_node *dn;
if (!dev || !eeh_subsystem_enabled)
@@ -958,21 +960,17 @@ void eeh_remove_device(struct pci_dev *dev)
PCI_DN(dn)->pcidev = NULL;
pci_dev_put (dev);
}
-EXPORT_SYMBOL_GPL(eeh_remove_device);
void eeh_remove_bus_device(struct pci_dev *dev)
{
+ struct pci_bus *bus = dev->subordinate;
+ struct pci_dev *child, *tmp;
+
eeh_remove_device(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- struct pci_bus *bus = dev->subordinate;
- struct list_head *ln;
- if (!bus)
- return;
- for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
- struct pci_dev *pdev = pci_dev_b(ln);
- if (pdev)
- eeh_remove_bus_device(pdev);
- }
+
+ if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+ eeh_remove_bus_device(child);
}
}
EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index cc2495a0cdd58..1fba695e32e82 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -293,15 +293,16 @@ void handle_eeh_events (struct eeh_event *event)
frozen_pdn = PCI_DN(frozen_dn);
frozen_pdn->eeh_freeze_count++;
- pci_str = pci_name (frozen_pdn->pcidev);
- drv_str = pcid_name (frozen_pdn->pcidev);
- if (!pci_str) {
+ if (frozen_pdn->pcidev) {
+ pci_str = pci_name (frozen_pdn->pcidev);
+ drv_str = pcid_name (frozen_pdn->pcidev);
+ } else {
pci_str = pci_name (event->dev);
drv_str = pcid_name (event->dev);
}
if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
- goto hard_fail;
+ goto excess_failures;
/* If the reset state is a '5' and the time to reset is 0 (infinity)
* or is more then 15 seconds, then mark this as a permanent failure.
@@ -356,7 +357,7 @@ void handle_eeh_events (struct eeh_event *event)
return;
-hard_fail:
+excess_failures:
/*
* About 90% of all real-life EEH failures in the field
* are due to poorly seated PCI cards. Only 10% or so are
@@ -367,7 +368,15 @@ hard_fail:
"and has been permanently disabled. Please try reseating\n"
"this device or replacing it.\n",
drv_str, pci_str, frozen_pdn->eeh_freeze_count);
+ goto perm_error;
+
+hard_fail:
+ printk(KERN_ERR
+ "EEH: Unable to recover from failure of PCI device %s - %s\n"
+ "Please try reseating this device or replacing it.\n",
+ drv_str, pci_str);
+perm_error:
eeh_slot_error_detail(frozen_pdn, 2 /* Permanent Error */);
/* Notify all devices that they're about to go down. */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 9a9961f27480d..a1bda6f96fd1f 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -19,7 +19,9 @@
*/
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/workqueue.h>
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>
@@ -37,14 +39,18 @@ LIST_HEAD(eeh_eventlist);
static void eeh_thread_launcher(void *);
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+/* Serialize reset sequences for a given pci device */
+DEFINE_MUTEX(eeh_event_mutex);
+
/**
- * eeh_event_handler - dispatch EEH events. The detection of a frozen
- * slot can occur inside an interrupt, where it can be hard to do
- * anything about it. The goal of this routine is to pull these
- * detection events out of the context of the interrupt handler, and
- * re-dispatch them for processing at a later time in a normal context.
- *
+ * eeh_event_handler - dispatch EEH events.
* @dummy - unused
+ *
+ * The detection of a frozen slot can occur inside an interrupt,
+ * where it can be hard to do anything about it. The goal of this
+ * routine is to pull these detection events out of the context
+ * of the interrupt handler, and re-dispatch them for processing
+ * at a later time in a normal context.
*/
static int eeh_event_handler(void * dummy)
{
@@ -64,23 +70,24 @@ static int eeh_event_handler(void * dummy)
event = list_entry(eeh_eventlist.next, struct eeh_event, list);
list_del(&event->list);
}
-
- if (event)
- eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
-
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
+
if (event == NULL)
break;
+ /* Serialize processing of EEH events */
+ mutex_lock(&eeh_event_mutex);
+ eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
+
printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
pci_name(event->dev));
handle_eeh_events(event);
eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
-
pci_dev_put(event->dev);
kfree(event);
+ mutex_unlock(&eeh_event_mutex);
}
return 0;
@@ -88,7 +95,6 @@ static int eeh_event_handler(void * dummy)
/**
* eeh_thread_launcher
- *
* @dummy - unused
*/
static void eeh_thread_launcher(void *dummy)
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index db7c19fe92972..c9ff547f9d251 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -127,3 +127,103 @@ _GLOBAL(plpar_hcall_4out)
mtcrf 0xff,r0
blr /* return r3 = status */
+
+/* plpar_hcall_7arg_7ret(unsigned long opcode, R3
+ unsigned long arg1, R4
+ unsigned long arg2, R5
+ unsigned long arg3, R6
+ unsigned long arg4, R7
+ unsigned long arg5, R8
+ unsigned long arg6, R9
+ unsigned long arg7, R10
+ unsigned long *out1, 112(R1)
+ unsigned long *out2, 110(R1)
+ unsigned long *out3, 108(R1)
+ unsigned long *out4, 106(R1)
+ unsigned long *out5, 104(R1)
+ unsigned long *out6, 102(R1)
+ unsigned long *out7); 100(R1)
+*/
+_GLOBAL(plpar_hcall_7arg_7ret)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HVSC /* invoke the hypervisor */
+
+ lwz r0,8(r1)
+
+ ld r11,STK_PARM(r11)(r1) /* Fetch r4 ret arg */
+ std r4,0(r11)
+ ld r11,STK_PARM(r12)(r1) /* Fetch r5 ret arg */
+ std r5,0(r11)
+ ld r11,STK_PARM(r13)(r1) /* Fetch r6 ret arg */
+ std r6,0(r11)
+ ld r11,STK_PARM(r14)(r1) /* Fetch r7 ret arg */
+ std r7,0(r11)
+ ld r11,STK_PARM(r15)(r1) /* Fetch r8 ret arg */
+ std r8,0(r11)
+ ld r11,STK_PARM(r16)(r1) /* Fetch r9 ret arg */
+ std r9,0(r11)
+ ld r11,STK_PARM(r17)(r1) /* Fetch r10 ret arg */
+ std r10,0(r11)
+
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
+/* plpar_hcall_9arg_9ret(unsigned long opcode, R3
+ unsigned long arg1, R4
+ unsigned long arg2, R5
+ unsigned long arg3, R6
+ unsigned long arg4, R7
+ unsigned long arg5, R8
+ unsigned long arg6, R9
+ unsigned long arg7, R10
+ unsigned long arg8, 112(R1)
+ unsigned long arg9, 110(R1)
+ unsigned long *out1, 108(R1)
+ unsigned long *out2, 106(R1)
+ unsigned long *out3, 104(R1)
+ unsigned long *out4, 102(R1)
+ unsigned long *out5, 100(R1)
+ unsigned long *out6, 98(R1)
+ unsigned long *out7); 96(R1)
+ unsigned long *out8, 94(R1)
+ unsigned long *out9, 92(R1)
+*/
+_GLOBAL(plpar_hcall_9arg_9ret)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */
+ ld r12,STK_PARM(r12)(r1) /* put arg9 in R12 */
+
+ HVSC /* invoke the hypervisor */
+
+ ld r0,STK_PARM(r13)(r1) /* Fetch r4 ret arg */
+ stdx r4,r0,r0
+ ld r0,STK_PARM(r14)(r1) /* Fetch r5 ret arg */
+ stdx r5,r0,r0
+ ld r0,STK_PARM(r15)(r1) /* Fetch r6 ret arg */
+ stdx r6,r0,r0
+ ld r0,STK_PARM(r16)(r1) /* Fetch r7 ret arg */
+ stdx r7,r0,r0
+ ld r0,STK_PARM(r17)(r1) /* Fetch r8 ret arg */
+ stdx r8,r0,r0
+ ld r0,STK_PARM(r18)(r1) /* Fetch r9 ret arg */
+ stdx r9,r0,r0
+ ld r0,STK_PARM(r19)(r1) /* Fetch r10 ret arg */
+ stdx r10,r0,r0
+ ld r0,STK_PARM(r20)(r1) /* Fetch r11 ret arg */
+ stdx r11,r0,r0
+ ld r0,STK_PARM(r21)(r1) /* Fetch r12 ret arg */
+ stdx r12,r0,r0
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index ba6befd96636e..a72a987f1d4d4 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -41,7 +41,7 @@ int hvc_get_chars(uint32_t vtermno, char *buf, int count)
unsigned long got;
if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got,
- (unsigned long *)buf, (unsigned long *)buf+1) == H_Success)
+ (unsigned long *)buf, (unsigned long *)buf+1) == H_SUCCESS)
return got;
return 0;
}
@@ -69,9 +69,9 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
lbuf[1]);
- if (ret == H_Success)
+ if (ret == H_SUCCESS)
return count;
- if (ret == H_Busy)
+ if (ret == H_BUSY)
return 0;
return -EIO;
}
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index 22bfb5c89db9f..fcf4b4cbeaf33 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -43,21 +43,21 @@ MODULE_VERSION(HVCS_ARCH_VERSION);
static int hvcs_convert(long to_convert)
{
switch (to_convert) {
- case H_Success:
+ case H_SUCCESS:
return 0;
- case H_Parameter:
+ case H_PARAMETER:
return -EINVAL;
- case H_Hardware:
+ case H_HARDWARE:
return -EIO;
- case H_Busy:
- case H_LongBusyOrder1msec:
- case H_LongBusyOrder10msec:
- case H_LongBusyOrder100msec:
- case H_LongBusyOrder1sec:
- case H_LongBusyOrder10sec:
- case H_LongBusyOrder100sec:
+ case H_BUSY:
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ case H_LONG_BUSY_ORDER_1_SEC:
+ case H_LONG_BUSY_ORDER_10_SEC:
+ case H_LONG_BUSY_ORDER_100_SEC:
return -EBUSY;
- case H_Function: /* fall through */
+ case H_FUNCTION: /* fall through */
default:
return -EPERM;
}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8952528d31ac6..634b7d06d3cc2 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -54,7 +54,8 @@ EXPORT_SYMBOL(plpar_hcall);
EXPORT_SYMBOL(plpar_hcall_4out);
EXPORT_SYMBOL(plpar_hcall_norets);
EXPORT_SYMBOL(plpar_hcall_8arg_2ret);
-
+EXPORT_SYMBOL(plpar_hcall_7arg_7ret);
+EXPORT_SYMBOL(plpar_hcall_9arg_9ret);
extern void pSeries_find_serial_port(void);
@@ -72,7 +73,7 @@ static void udbg_hvsi_putc(char c)
do {
rc = plpar_put_term_char(vtermno, sizeof(packet), packet);
- } while (rc == H_Busy);
+ } while (rc == H_BUSY);
}
static long hvsi_udbg_buf_len;
@@ -85,7 +86,7 @@ static int udbg_hvsi_getc_poll(void)
if (hvsi_udbg_buf_len == 0) {
rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf);
- if (rc != H_Success || hvsi_udbg_buf[0] != 0xff) {
+ if (rc != H_SUCCESS || hvsi_udbg_buf[0] != 0xff) {
/* bad read or non-data packet */
hvsi_udbg_buf_len = 0;
} else {
@@ -139,7 +140,7 @@ static void udbg_putcLP(char c)
buf[0] = c;
do {
rc = plpar_put_term_char(vtermno, 1, buf);
- } while(rc == H_Busy);
+ } while(rc == H_BUSY);
}
/* Buffered chars getc */
@@ -158,7 +159,7 @@ static int udbg_getc_pollLP(void)
/* get some more chars. */
inbuflen = 0;
rc = plpar_get_term_char(vtermno, &inbuflen, buf);
- if (rc != H_Success)
+ if (rc != H_SUCCESS)
inbuflen = 0; /* otherwise inbuflen is garbage */
}
if (inbuflen <= 0 || inbuflen > 16) {
@@ -304,7 +305,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v,
hpte_r, &slot, &dummy0, &dummy1);
- if (unlikely(lpar_rc == H_PTEG_Full)) {
+ if (unlikely(lpar_rc == H_PTEG_FULL)) {
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" full\n");
return -1;
@@ -315,7 +316,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
* will fail. However we must catch the failure in hash_page
* or we will loop forever, so return -2 in this case.
*/
- if (unlikely(lpar_rc != H_Success)) {
+ if (unlikely(lpar_rc != H_SUCCESS)) {
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" lpar err %d\n", lpar_rc);
return -2;
@@ -346,9 +347,9 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
/* don't remove a bolted entry */
lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
(0x1UL << 4), &dummy1, &dummy2);
- if (lpar_rc == H_Success)
+ if (lpar_rc == H_SUCCESS)
return i;
- BUG_ON(lpar_rc != H_Not_Found);
+ BUG_ON(lpar_rc != H_NOT_FOUND);
slot_offset++;
slot_offset &= 0x7;
@@ -391,14 +392,14 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN);
- if (lpar_rc == H_Not_Found) {
+ if (lpar_rc == H_NOT_FOUND) {
DBG_LOW("not found !\n");
return -1;
}
DBG_LOW("ok\n");
- BUG_ON(lpar_rc != H_Success);
+ BUG_ON(lpar_rc != H_SUCCESS);
return 0;
}
@@ -417,7 +418,7 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
- BUG_ON(lpar_rc != H_Success);
+ BUG_ON(lpar_rc != H_SUCCESS);
return dword0;
}
@@ -468,7 +469,7 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
flags = newpp & 7;
lpar_rc = plpar_pte_protect(flags, slot, 0);
- BUG_ON(lpar_rc != H_Success);
+ BUG_ON(lpar_rc != H_SUCCESS);
}
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
@@ -484,10 +485,10 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
want_v = hpte_encode_v(va, psize);
lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN,
&dummy1, &dummy2);
- if (lpar_rc == H_Not_Found)
+ if (lpar_rc == H_NOT_FOUND)
return;
- BUG_ON(lpar_rc != H_Success);
+ BUG_ON(lpar_rc != H_SUCCESS);
}
/*
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index b2fbf8ba8fbb7..5eb55ef1c91cd 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -463,7 +463,7 @@ static void pseries_dedicated_idle_sleep(void)
* very low priority. The cede enables interrupts, which
* doesn't matter here.
*/
- if (!lppaca[cpu ^ 1].idle || poll_pending() == H_Pending)
+ if (!lppaca[cpu ^ 1].idle || poll_pending() == H_PENDING)
cede_processor();
out:
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 866379b80c090..8e53e04ada8ba 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -258,7 +258,7 @@ EXPORT_SYMBOL(vio_find_node);
int vio_enable_interrupts(struct vio_dev *dev)
{
int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
- if (rc != H_Success)
+ if (rc != H_SUCCESS)
printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
return rc;
}
@@ -267,7 +267,7 @@ EXPORT_SYMBOL(vio_enable_interrupts);
int vio_disable_interrupts(struct vio_dev *dev)
{
int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
- if (rc != H_Success)
+ if (rc != H_SUCCESS)
printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 4864cb32be250..2d60ea30fed6a 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -168,7 +168,7 @@ static int pSeriesLP_xirr_info_get(int n_cpu)
unsigned long return_value;
lpar_rc = plpar_xirr(&return_value);
- if (lpar_rc != H_Success)
+ if (lpar_rc != H_SUCCESS)
panic(" bad return code xirr - rc = %lx \n", lpar_rc);
return (int)return_value;
}
@@ -179,7 +179,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value)
unsigned long val64 = value & 0xffffffff;
lpar_rc = plpar_eoi(val64);
- if (lpar_rc != H_Success)
+ if (lpar_rc != H_SUCCESS)
panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
val64);
}
@@ -189,7 +189,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value)
unsigned long lpar_rc;
lpar_rc = plpar_cppr(value);
- if (lpar_rc != H_Success)
+ if (lpar_rc != H_SUCCESS)
panic("bad return code cppr - rc = %lx\n", lpar_rc);
}
@@ -198,7 +198,7 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value)
unsigned long lpar_rc;
lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
- if (lpar_rc != H_Success)
+ if (lpar_rc != H_SUCCESS)
panic("bad return code qirr - rc = %lx\n", lpar_rc);
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2b8841f85534a..343120c9223d7 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -801,7 +801,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
*/
print_cpu_info(&S390_lowcore.cpu_data);
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
lowcore_ptr[i] = (struct _lowcore *)
__get_free_pages(GFP_KERNEL|GFP_DMA,
sizeof(void*) == 8 ? 1 : 0);
@@ -831,7 +831,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#endif
set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
- for_each_cpu(cpu)
+ for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
}
@@ -868,7 +868,7 @@ static int __init topology_init(void)
int cpu;
int ret;
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index cf94e8ef17c5e..868e68b288809 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -30,7 +30,7 @@ static int x##_disabled __initdata = 0; \
static int __init x##_setup(char *opts) \
{ \
x##_disabled = 1; \
- return 0; \
+ return 1; \
} \
__setup("no" __stringify(x), x##_setup);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 7ee4ca203616f..bb229ef030f3c 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -401,7 +401,7 @@ static int __init topology_init(void)
{
int cpu_id;
- for_each_cpu(cpu_id)
+ for_each_possible_cpu(cpu_id)
register_cpu(&cpu[cpu_id], cpu_id, NULL);
return 0;
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index 768de64b371fd..fbbec5e761c68 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -64,13 +64,13 @@ sys_call_table:
/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex
/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
-/*230*/ .long sys_select, sys_time, sys_nis_syscall, sys_stime, sys_statfs64
+/*230*/ .long sys_select, sys_time, sys_splice, sys_stime, sys_statfs64
/* "We are the Knights of the Forest of Ni!!" */
/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
-/*255*/ .long sys_nis_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 900fb0b940d80..30389085a3591 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.16
-# Sun Mar 26 14:58:11 2006
+# Fri Mar 31 01:40:57 2006
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
@@ -180,6 +180,7 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
CONFIG_INET_TUNNEL=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
@@ -203,6 +204,7 @@ CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_XFRM_TUNNEL=m
CONFIG_INET6_TUNNEL=m
CONFIG_IPV6_TUNNEL=m
# CONFIG_NETFILTER is not set
@@ -308,7 +310,6 @@ CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_UB=m
# CONFIG_BLK_DEV_RAM is not set
-CONFIG_BLK_DEV_RAM_COUNT=16
# CONFIG_BLK_DEV_INITRD is not set
CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
@@ -449,6 +450,7 @@ CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID5=m
+# CONFIG_MD_RAID5_RESHAPE is not set
CONFIG_MD_RAID6=m
CONFIG_MD_MULTIPATH=m
# CONFIG_MD_FAULTY is not set
@@ -741,9 +743,7 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_RTC8564 is not set
# CONFIG_SENSORS_MAX6875 is not set
-# CONFIG_RTC_X1205_I2C is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -826,6 +826,7 @@ CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_FIRMWARE_EDID is not set
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_CIRRUS is not set
@@ -1117,6 +1118,11 @@ CONFIG_USB_HIDDEV=y
#
#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
# Misc Linux/SPARC drivers
#
CONFIG_SUN_OPENPROMIO=m
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 7dc28a4842686..8175a6968c6bc 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -830,9 +830,16 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
static void tsb_sync(void *info)
{
+ struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
struct mm_struct *mm = info;
- if (current->active_mm == mm)
+ /* It is not valid to test "currrent->active_mm == mm" here.
+ *
+ * The value of "current" is not changed atomically with
+ * switch_mm(). But that's OK, we just need to check the
+ * current cpu's trap block PGD physical address.
+ */
+ if (tp->pgd_paddr == __pa(mm->pgd))
tsb_context_switch(mm);
}
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index c4a1cef4b1e54..86dd5cb81e09f 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -136,6 +136,8 @@ SIGN1(sys32_getpeername, sys_getpeername, %o0)
SIGN1(sys32_getsockname, sys_getsockname, %o0)
SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1)
SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2)
+SIGN2(sys32_splice, sys_splice, %o0, %o1)
+SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5)
.globl sys32_mmap2
sys32_mmap2:
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 2e906bad56faa..31030bf00f1a7 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1069,3 +1069,11 @@ long sys32_lookup_dcookie(unsigned long cookie_high,
return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
buf, len);
}
+
+long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, int flags)
+{
+ return sys_sync_file_range(fd,
+ (off_high << 32) | off_low,
+ (nb_high << 32) | nb_low,
+ flags);
+}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 3b250f2318fda..857b82c82875b 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -66,12 +66,12 @@ sys_call_table32:
.word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex
/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
.word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
-/*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64
+/*230*/ .word sys32_select, compat_sys_time, sys32_splice, compat_sys_stime, compat_sys_statfs64
.word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
.word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
- .word sys_ni_syscall, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
+ .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
@@ -135,12 +135,12 @@ sys_call_table:
.word sys_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex
/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
.word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
-/*230*/ .word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64
+/*230*/ .word sys_select, sys_nis_syscall, sys_splice, sys_stime, sys_statfs64
.word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
.word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
/*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
- .word sys_ni_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+ .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
.word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 0db2f7d9fab50..6e002aacb9618 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -327,8 +327,12 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
insn = get_fault_insn(regs, 0);
if (!insn)
goto continue_fault;
+ /* All loads, stores and atomics have bits 30 and 31 both set
+ * in the instruction. Bit 21 is set in all stores, but we
+ * have to avoid prefetches which also have bit 21 set.
+ */
if ((insn & 0xc0200000) == 0xc0200000 &&
- (insn & 0x1780000) != 0x1680000) {
+ (insn & 0x01780000) != 0x01680000) {
/* Don't bother updating thread struct value,
* because update_mmu_cache only cares which tlb
* the access came from.
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 074620d413d47..fbbbebbad8a49 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -198,6 +198,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pmd_t *pmd;
pte_t *pte = NULL;
+ /* We must align the address, because our caller will run
+ * set_huge_pte_at() on whatever we return, which writes out
+ * all of the sub-ptes for the hugepage range. So we have
+ * to give it the first such sub-pte.
+ */
+ addr &= HPAGE_MASK;
+
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (pud) {
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 5982fe2753e02..05fbb20636cbe 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -22,6 +22,9 @@ config SBUS
config PCI
bool
+config PCMCIA
+ bool
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 8d14c7a831bed..24790bed2054d 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -20,7 +20,7 @@ core-y += $(ARCH_DIR)/kernel/ \
# Have to precede the include because the included Makefiles reference them.
SYMLINK_HEADERS := archparam.h system.h sigcontext.h processor.h ptrace.h \
- module.h vm-flags.h elf.h ldt.h
+ module.h vm-flags.h elf.h host_ldt.h
SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header))
# XXX: The "os" symlink is only used by arch/um/include/os.h, which includes
@@ -129,7 +129,7 @@ CPPFLAGS_vmlinux.lds = -U$(SUBARCH) \
-DSTART=$(START) -DELF_ARCH=$(ELF_ARCH) \
-DELF_FORMAT="$(ELF_FORMAT)" $(CPP_MODE-y) \
-DKERNEL_STACK_SIZE=$(STACK_SIZE) \
- -DUNMAP_PATH=arch/um/sys-$(SUBARCH)/unmap_fin.o
+ -DUNMAP_PATH=arch/um/sys-$(SUBARCH)/unmap.o
#The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
@@ -150,8 +150,7 @@ CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \
$(ARCH_DIR)/include/user_constants.h \
$(ARCH_DIR)/include/kern_constants.h $(ARCH_DIR)/Kconfig.arch
-MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \
- $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os
+MRPROPER_FILES += $(ARCH_SYMLINKS)
archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64
index 38df311e75dc4..dfd88b652fbe5 100644
--- a/arch/um/Makefile-x86_64
+++ b/arch/um/Makefile-x86_64
@@ -1,7 +1,7 @@
# Copyright 2003 - 2004 Pathscale, Inc
# Released under the GPL
-libs-y += arch/um/sys-x86_64/
+core-y += arch/um/sys-x86_64/
START := 0x60000000
#We #undef __x86_64__ for kernelspace, not for userspace where
diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c
index a61b7b46bc025..53d09ed78b425 100644
--- a/arch/um/drivers/daemon_kern.c
+++ b/arch/um/drivers/daemon_kern.c
@@ -95,18 +95,7 @@ static struct transport daemon_transport = {
static int register_daemon(void)
{
register_transport(&daemon_transport);
- return(1);
+ return 0;
}
__initcall(register_daemon);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 49acb2badf322..d18a974735e64 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -104,7 +104,7 @@ static int harddog_release(struct inode *inode, struct file *file)
extern int ping_watchdog(int fd);
-static ssize_t harddog_write(struct file *file, const char *data, size_t len,
+static ssize_t harddog_write(struct file *file, const char __user *data, size_t len,
loff_t *ppos)
{
/*
@@ -118,6 +118,7 @@ static ssize_t harddog_write(struct file *file, const char *data, size_t len,
static int harddog_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
+ void __user *argp= (void __user *)arg;
static struct watchdog_info ident = {
WDIOC_SETTIMEOUT,
0,
@@ -127,13 +128,12 @@ static int harddog_ioctl(struct inode *inode, struct file *file,
default:
return -ENOTTY;
case WDIOC_GETSUPPORT:
- if(copy_to_user((struct harddog_info *)arg, &ident,
- sizeof(ident)))
+ if(copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
- return put_user(0,(int *)arg);
+ return put_user(0,(int __user *)argp);
case WDIOC_KEEPALIVE:
return(ping_watchdog(harddog_out_fd));
}
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 59602b81b2402..37232f908cd74 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -67,8 +67,8 @@ MODULE_PARM_DESC(mixer, MIXER_HELP);
/* /dev/dsp file operations */
-static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count,
- loff_t *ppos)
+static ssize_t hostaudio_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
void *kbuf;
@@ -94,7 +94,7 @@ static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count,
return(err);
}
-static ssize_t hostaudio_write(struct file *file, const char *buffer,
+static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
@@ -152,7 +152,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
- if(get_user(data, (int *) arg))
+ if(get_user(data, (int __user *) arg))
return(-EFAULT);
break;
default:
@@ -168,7 +168,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
- if(put_user(data, (int *) arg))
+ if(put_user(data, (int __user *) arg))
return(-EFAULT);
break;
default:
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
index c9b078fba03e4..3a7af18cf9442 100644
--- a/arch/um/drivers/mcast_kern.c
+++ b/arch/um/drivers/mcast_kern.c
@@ -124,18 +124,7 @@ static struct transport mcast_transport = {
static int register_mcast(void)
{
register_transport(&mcast_transport);
- return(1);
+ return 0;
}
__initcall(register_mcast);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 1488816588ea5..28e3760e8b981 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -20,6 +20,8 @@
#include "linux/namei.h"
#include "linux/proc_fs.h"
#include "linux/syscalls.h"
+#include "linux/list.h"
+#include "linux/mm.h"
#include "linux/console.h"
#include "asm/irq.h"
#include "asm/uaccess.h"
@@ -347,6 +349,142 @@ static struct mc_device *mconsole_find_dev(char *name)
return(NULL);
}
+#define UNPLUGGED_PER_PAGE \
+ ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(unsigned long))
+
+struct unplugged_pages {
+ struct list_head list;
+ void *pages[UNPLUGGED_PER_PAGE];
+};
+
+static unsigned long long unplugged_pages_count = 0;
+static struct list_head unplugged_pages = LIST_HEAD_INIT(unplugged_pages);
+static int unplug_index = UNPLUGGED_PER_PAGE;
+
+static int mem_config(char *str)
+{
+ unsigned long long diff;
+ int err = -EINVAL, i, add;
+ char *ret;
+
+ if(str[0] != '=')
+ goto out;
+
+ str++;
+ if(str[0] == '-')
+ add = 0;
+ else if(str[0] == '+'){
+ add = 1;
+ }
+ else goto out;
+
+ str++;
+ diff = memparse(str, &ret);
+ if(*ret != '\0')
+ goto out;
+
+ diff /= PAGE_SIZE;
+
+ for(i = 0; i < diff; i++){
+ struct unplugged_pages *unplugged;
+ void *addr;
+
+ if(add){
+ if(list_empty(&unplugged_pages))
+ break;
+
+ unplugged = list_entry(unplugged_pages.next,
+ struct unplugged_pages, list);
+ if(unplug_index > 0)
+ addr = unplugged->pages[--unplug_index];
+ else {
+ list_del(&unplugged->list);
+ addr = unplugged;
+ unplug_index = UNPLUGGED_PER_PAGE;
+ }
+
+ free_page((unsigned long) addr);
+ unplugged_pages_count--;
+ }
+ else {
+ struct page *page;
+
+ page = alloc_page(GFP_ATOMIC);
+ if(page == NULL)
+ break;
+
+ unplugged = page_address(page);
+ if(unplug_index == UNPLUGGED_PER_PAGE){
+ INIT_LIST_HEAD(&unplugged->list);
+ list_add(&unplugged->list, &unplugged_pages);
+ unplug_index = 0;
+ }
+ else {
+ struct list_head *entry = unplugged_pages.next;
+ addr = unplugged;
+
+ unplugged = list_entry(entry,
+ struct unplugged_pages,
+ list);
+ unplugged->pages[unplug_index++] = addr;
+ err = os_drop_memory(addr, PAGE_SIZE);
+ if(err)
+ printk("Failed to release memory - "
+ "errno = %d\n", err);
+ }
+
+ unplugged_pages_count++;
+ }
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
+static int mem_get_config(char *name, char *str, int size, char **error_out)
+{
+ char buf[sizeof("18446744073709551615")];
+ int len = 0;
+
+ sprintf(buf, "%ld", uml_physmem);
+ CONFIG_CHUNK(str, size, len, buf, 1);
+
+ return len;
+}
+
+static int mem_id(char **str, int *start_out, int *end_out)
+{
+ *start_out = 0;
+ *end_out = 0;
+
+ return 0;
+}
+
+static int mem_remove(int n)
+{
+ return -EBUSY;
+}
+
+static struct mc_device mem_mc = {
+ .name = "mem",
+ .config = mem_config,
+ .get_config = mem_get_config,
+ .id = mem_id,
+ .remove = mem_remove,
+};
+
+static int mem_mc_init(void)
+{
+ if(can_drop_memory())
+ mconsole_register_dev(&mem_mc);
+ else printk("Can't release memory to the host - memory hotplug won't "
+ "be supported\n");
+ return 0;
+}
+
+__initcall(mem_mc_init);
+
#define CONFIG_BUF_SIZE 64
static void mconsole_get_config(int (*get_config)(char *, char *, int,
@@ -478,7 +616,7 @@ static void console_write(struct console *console, const char *string,
return;
while(1){
- n = min(len, ARRAY_SIZE(console_buf) - console_index);
+ n = min((size_t)len, ARRAY_SIZE(console_buf) - console_index);
strncpy(&console_buf[console_index], string, n);
console_index += n;
string += n;
diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c
index 07c80f2156ef1..466ff2c2f9183 100644
--- a/arch/um/drivers/pcap_kern.c
+++ b/arch/um/drivers/pcap_kern.c
@@ -106,18 +106,7 @@ static struct transport pcap_transport = {
static int register_pcap(void)
{
register_transport(&pcap_transport);
- return(1);
+ return 0;
}
__initcall(register_pcap);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index a62f5ef445cfe..163ee0d5f75e4 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -93,18 +93,7 @@ static struct transport slip_transport = {
static int register_slip(void)
{
register_transport(&slip_transport);
- return(1);
+ return 0;
}
__initcall(register_slip);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index 33d7982be5d3a..95e50c943e14f 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -77,7 +77,7 @@ static int slirp_setup(char *str, char **mac_out, void *data)
int i=0;
*init = ((struct slirp_init)
- { argw : { { "slirp", NULL } } });
+ { .argw = { { "slirp", NULL } } });
str = split_if_spec(str, mac_out, NULL);
@@ -116,18 +116,7 @@ static struct transport slirp_transport = {
static int register_slirp(void)
{
register_transport(&slirp_transport);
- return(1);
+ return 0;
}
__initcall(register_slirp);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0336575d24489..0897852b09a38 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -891,7 +891,7 @@ int ubd_driver_init(void){
SA_INTERRUPT, "ubd", ubd_dev);
if(err != 0)
printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err);
- return(err);
+ return 0;
}
device_initcall(ubd_driver_init);
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index 07176d92e1c92..42557130a4081 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -116,7 +116,11 @@ extern void *get_current(void);
extern struct task_struct *get_task(int pid, int require);
extern void machine_halt(void);
extern int is_syscall(unsigned long addr);
-extern void arch_switch(void);
+
+extern void arch_switch_to_tt(struct task_struct *from, struct task_struct *to);
+
+extern void arch_switch_to_skas(struct task_struct *from, struct task_struct *to);
+
extern void free_irq(unsigned int, void *);
extern int cpu(void);
diff --git a/arch/um/include/line.h b/arch/um/include/line.h
index 6f4d680dc1d4a..6ac0f8252e21e 100644
--- a/arch/um/include/line.h
+++ b/arch/um/include/line.h
@@ -58,23 +58,17 @@ struct line {
};
#define LINE_INIT(str, d) \
- { init_str : str, \
- init_pri : INIT_STATIC, \
- valid : 1, \
- throttled : 0, \
- lock : SPIN_LOCK_UNLOCKED, \
- buffer : NULL, \
- head : NULL, \
- tail : NULL, \
- sigio : 0, \
- driver : d, \
- have_irq : 0 }
+ { .init_str = str, \
+ .init_pri = INIT_STATIC, \
+ .valid = 1, \
+ .lock = SPIN_LOCK_UNLOCKED, \
+ .driver = d }
struct lines {
int num;
};
-#define LINES_INIT(n) { num : n }
+#define LINES_INIT(n) { .num = n }
extern void line_close(struct tty_struct *tty, struct file * filp);
extern int line_open(struct line *lines, struct tty_struct *tty);
diff --git a/arch/um/include/mem_user.h b/arch/um/include/mem_user.h
index a1064c5823bfe..a54514d2cc3ab 100644
--- a/arch/um/include/mem_user.h
+++ b/arch/um/include/mem_user.h
@@ -49,7 +49,6 @@ extern int iomem_size;
extern unsigned long host_task_size;
extern unsigned long task_size;
-extern void check_devanon(void);
extern int init_mem_user(void);
extern void setup_memory(void *entry);
extern unsigned long find_iomem(char *driver, unsigned long *len_out);
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index d3d1bc6074ef4..f88856c28a66e 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -13,6 +13,7 @@
#include "kern_util.h"
#include "skas/mm_id.h"
#include "irq_user.h"
+#include "sysdep/tls.h"
#define OS_TYPE_FILE 1
#define OS_TYPE_DIR 2
@@ -172,6 +173,7 @@ extern int os_fchange_dir(int fd);
extern void os_early_checks(void);
extern int can_do_skas(void);
extern void os_check_bugs(void);
+extern void check_host_supports_tls(int *supports_tls, int *tls_min);
/* Make sure they are clear when running in TT mode. Required by
* SEGV_MAYBE_FIXABLE */
@@ -205,6 +207,8 @@ extern int os_map_memory(void *virt, int fd, unsigned long long off,
extern int os_protect_memory(void *addr, unsigned long len,
int r, int w, int x);
extern int os_unmap_memory(void *addr, int len);
+extern int os_drop_memory(void *addr, int length);
+extern int can_drop_memory(void);
extern void os_flush_stdout(void);
/* tt.c
@@ -234,8 +238,12 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
int stack_order);
extern int helper_wait(int pid);
-/* umid.c */
+/* tls.c */
+extern int os_set_thread_area(user_desc_t *info, int pid);
+extern int os_get_thread_area(user_desc_t *info, int pid);
+
+/* umid.c */
extern int umid_file_name(char *name, char *buf, int len);
extern int set_umid(char *name);
extern char *get_umid(void);
diff --git a/arch/um/include/sysdep-i386/checksum.h b/arch/um/include/sysdep-i386/checksum.h
index 7d3d202d7fff5..052bb061a9781 100644
--- a/arch/um/include/sysdep-i386/checksum.h
+++ b/arch/um/include/sysdep-i386/checksum.h
@@ -48,7 +48,8 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *
*/
static __inline__
-unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
+unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
+ unsigned char *dst,
int len, int sum, int *err_ptr)
{
if(copy_from_user(dst, src, len)){
@@ -192,7 +193,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
*/
#define HAVE_CSUM_COPY_USER
static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
- unsigned char *dst,
+ unsigned char __user *dst,
int len, int sum, int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len)){
diff --git a/arch/um/include/sysdep-i386/ptrace.h b/arch/um/include/sysdep-i386/ptrace.h
index c8ee9559f3ab2..6670cc992ecb5 100644
--- a/arch/um/include/sysdep-i386/ptrace.h
+++ b/arch/um/include/sysdep-i386/ptrace.h
@@ -14,7 +14,12 @@
#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
#define MAX_REG_OFFSET (UM_FRAME_SIZE)
+#ifdef UML_CONFIG_PT_PROXY
extern void update_debugregs(int seq);
+#else
+static inline void update_debugregs(int seq) {}
+#endif
+
/* syscall emulation path in ptrace */
diff --git a/arch/um/include/sysdep-i386/tls.h b/arch/um/include/sysdep-i386/tls.h
new file mode 100644
index 0000000000000..918fd3c5ff9ca
--- /dev/null
+++ b/arch/um/include/sysdep-i386/tls.h
@@ -0,0 +1,32 @@
+#ifndef _SYSDEP_TLS_H
+#define _SYSDEP_TLS_H
+
+# ifndef __KERNEL__
+
+/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
+ * may be named user_desc (but in 2.4 and in header matching its API was named
+ * modify_ldt_ldt_s). */
+
+typedef struct um_dup_user_desc {
+ unsigned int entry_number;
+ unsigned int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+} user_desc_t;
+
+# else /* __KERNEL__ */
+
+# include <asm/ldt.h>
+typedef struct user_desc user_desc_t;
+
+# endif /* __KERNEL__ */
+
+#define GDT_ENTRY_TLS_MIN_I386 6
+#define GDT_ENTRY_TLS_MIN_X86_64 12
+
+#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/include/sysdep-x86_64/tls.h b/arch/um/include/sysdep-x86_64/tls.h
new file mode 100644
index 0000000000000..35f19f25bd3b7
--- /dev/null
+++ b/arch/um/include/sysdep-x86_64/tls.h
@@ -0,0 +1,29 @@
+#ifndef _SYSDEP_TLS_H
+#define _SYSDEP_TLS_H
+
+# ifndef __KERNEL__
+
+/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
+ * may be named user_desc (but in 2.4 and in header matching its API was named
+ * modify_ldt_ldt_s). */
+
+typedef struct um_dup_user_desc {
+ unsigned int entry_number;
+ unsigned int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+ unsigned int lm:1;
+} user_desc_t;
+
+# else /* __KERNEL__ */
+
+# include <asm/ldt.h>
+typedef struct user_desc user_desc_t;
+
+# endif /* __KERNEL__ */
+#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h
index 992a7e1e0fca9..fe0c29b5144db 100644
--- a/arch/um/include/user_util.h
+++ b/arch/um/include/user_util.h
@@ -8,6 +8,9 @@
#include "sysdep/ptrace.h"
+/* Copied from kernel.h */
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
#define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR))
extern int mode_tt;
@@ -31,7 +34,7 @@ extern unsigned long uml_physmem;
extern unsigned long uml_reserved;
extern unsigned long end_vm;
extern unsigned long start_vm;
-extern unsigned long highmem;
+extern unsigned long long highmem;
extern char host_info[];
diff --git a/arch/um/kernel/exec_kern.c b/arch/um/kernel/exec_kern.c
index 1ca84319317d2..c0cb627bf594b 100644
--- a/arch/um/kernel/exec_kern.c
+++ b/arch/um/kernel/exec_kern.c
@@ -22,6 +22,7 @@
void flush_thread(void)
{
+ arch_flush_thread(&current->thread.arch);
CHOOSE_MODE(flush_thread_tt(), flush_thread_skas());
}
@@ -58,14 +59,14 @@ long um_execve(char *file, char __user *__user *argv, char __user *__user *env)
return(err);
}
-long sys_execve(char *file, char __user *__user *argv,
+long sys_execve(char __user *file, char __user *__user *argv,
char __user *__user *env)
{
long error;
char *filename;
lock_kernel();
- filename = getname((char __user *) file);
+ filename = getname(file);
error = PTR_ERR(filename);
if (IS_ERR(filename)) goto out;
error = execve1(filename, argv, env);
@@ -74,14 +75,3 @@ long sys_execve(char *file, char __user *__user *argv,
unlock_kernel();
return(error);
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 92cce96b5e24d..44e41a35f000c 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -30,7 +30,7 @@ extern char __binary_start;
unsigned long *empty_zero_page = NULL;
unsigned long *empty_bad_page = NULL;
pgd_t swapper_pg_dir[PTRS_PER_PGD];
-unsigned long highmem;
+unsigned long long highmem;
int kmalloc_ok = 0;
static unsigned long brk_end;
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 3113cab8675e6..f6a5a502120be 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -156,9 +156,25 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long stack_top, struct task_struct * p,
struct pt_regs *regs)
{
+ int ret;
+
p->thread = (struct thread_struct) INIT_THREAD;
- return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
- clone_flags, sp, stack_top, p, regs));
+ ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
+ clone_flags, sp, stack_top, p, regs);
+
+ if (ret || !current->thread.forking)
+ goto out;
+
+ clear_flushed_tls(p);
+
+ /*
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS)
+ ret = arch_copy_tls(p);
+
+out:
+ return ret;
}
void initial_thread_cb(void (*proc)(void *), void *arg)
@@ -185,10 +201,6 @@ void default_idle(void)
{
CHOOSE_MODE(uml_idle_timer(), (void) 0);
- atomic_inc(&init_mm.mm_count);
- current->mm = &init_mm;
- current->active_mm = &init_mm;
-
while(1){
/* endless idle loop with no priority at all */
@@ -407,7 +419,7 @@ static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int
return strlen(buf);
}
-static int proc_write_sysemu(struct file *file,const char *buf, unsigned long count,void *data)
+static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
{
char tmp[2];
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 98e09395c093c..60d2eda995c17 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -46,6 +46,7 @@ extern int poke_user(struct task_struct * child, long addr, long data);
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int i, ret;
+ unsigned long __user *p = (void __user *)(unsigned long)data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -58,7 +59,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
if (copied != sizeof(tmp))
break;
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, p);
break;
}
@@ -136,15 +137,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_GETREGS
case PTRACE_GETREGS: { /* Get all gp regs from the child. */
- if (!access_ok(VERIFY_WRITE, (unsigned long *)data,
- MAX_REG_OFFSET)) {
+ if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) {
ret = -EIO;
break;
}
for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
- __put_user(getreg(child, i),
- (unsigned long __user *) data);
- data += sizeof(long);
+ __put_user(getreg(child, i), p);
+ p++;
}
ret = 0;
break;
@@ -153,15 +152,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_SETREGS
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
unsigned long tmp = 0;
- if (!access_ok(VERIFY_READ, (unsigned *)data,
- MAX_REG_OFFSET)) {
+ if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) {
ret = -EIO;
break;
}
for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
- __get_user(tmp, (unsigned long __user *) data);
+ __get_user(tmp, p);
putreg(child, i, tmp);
- data += sizeof(long);
+ p++;
}
ret = 0;
break;
@@ -187,14 +185,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = set_fpxregs(data, child);
break;
#endif
+ case PTRACE_GET_THREAD_AREA:
+ ret = ptrace_get_thread_area(child, addr,
+ (struct user_desc __user *) data);
+ break;
+
+ case PTRACE_SET_THREAD_AREA:
+ ret = ptrace_set_thread_area(child, addr,
+ (struct user_desc __user *) data);
+ break;
+
case PTRACE_FAULTINFO: {
- /* Take the info from thread->arch->faultinfo,
- * but transfer max. sizeof(struct ptrace_faultinfo).
- * On i386, ptrace_faultinfo is smaller!
- */
- ret = copy_to_user((unsigned long __user *) data,
- &child->thread.arch.faultinfo,
- sizeof(struct ptrace_faultinfo));
+ /* Take the info from thread->arch->faultinfo,
+ * but transfer max. sizeof(struct ptrace_faultinfo).
+ * On i386, ptrace_faultinfo is smaller!
+ */
+ ret = copy_to_user(p, &child->thread.arch.faultinfo,
+ sizeof(struct ptrace_faultinfo));
if(ret)
break;
break;
@@ -204,8 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_LDT: {
struct ptrace_ldt ldt;
- if(copy_from_user(&ldt, (unsigned long __user *) data,
- sizeof(ldt))){
+ if(copy_from_user(&ldt, p, sizeof(ldt))){
ret = -EIO;
break;
}
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index 3f70a2e12f067..2135eaf98a938 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -35,6 +35,8 @@ void switch_to_skas(void *prev, void *next)
switch_threads(&from->thread.mode.skas.switch_buf,
to->thread.mode.skas.switch_buf);
+ arch_switch_to_skas(current->thread.prev_sched, current);
+
if(current->pid == 0)
switch_timers(1);
}
@@ -89,10 +91,17 @@ void fork_handler(int sig)
panic("blech");
schedule_tail(current->thread.prev_sched);
+
+ /* XXX: if interrupt_end() calls schedule, this call to
+ * arch_switch_to_skas isn't needed. We could want to apply this to
+ * improve performance. -bb */
+ arch_switch_to_skas(current->thread.prev_sched, current);
+
current->thread.prev_sched = NULL;
/* Handle any immediate reschedules or signals */
interrupt_end();
+
userspace(&current->thread.regs.regs);
}
@@ -109,6 +118,8 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
handler = fork_handler;
+
+ arch_copy_thread(&current->thread.arch, &p->thread.arch);
}
else {
init_thread_registers(&p->thread.regs.regs);
diff --git a/arch/um/kernel/syscall_kern.c b/arch/um/kernel/syscall_kern.c
index 8e1a3501ff463..37d3978337d87 100644
--- a/arch/um/kernel/syscall_kern.c
+++ b/arch/um/kernel/syscall_kern.c
@@ -104,7 +104,7 @@ long sys_pipe(unsigned long __user * fildes)
}
-long sys_uname(struct old_utsname * name)
+long sys_uname(struct old_utsname __user * name)
{
long err;
if (!name)
@@ -115,7 +115,7 @@ long sys_uname(struct old_utsname * name)
return err?-EFAULT:0;
}
-long sys_olduname(struct oldold_utsname * name)
+long sys_olduname(struct oldold_utsname __user * name)
{
long error;
diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c
index d56046c2aba2f..02f6d4d8dc3af 100644
--- a/arch/um/kernel/trap_kern.c
+++ b/arch/um/kernel/trap_kern.c
@@ -198,7 +198,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRERR;
- si.si_addr = (void *)address;
+ si.si_addr = (void __user *)address;
current->thread.arch.faultinfo = fi;
force_sig_info(SIGBUS, &si, current);
} else if (err == -ENOMEM) {
@@ -207,7 +207,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
} else {
BUG_ON(err != -EFAULT);
si.si_signo = SIGSEGV;
- si.si_addr = (void *) address;
+ si.si_addr = (void __user *) address;
current->thread.arch.faultinfo = fi;
force_sig_info(SIGSEGV, &si, current);
}
@@ -220,8 +220,8 @@ void bad_segv(struct faultinfo fi, unsigned long ip)
si.si_signo = SIGSEGV;
si.si_code = SEGV_ACCERR;
- si.si_addr = (void *) FAULT_ADDRESS(fi);
- current->thread.arch.faultinfo = fi;
+ si.si_addr = (void __user *) FAULT_ADDRESS(fi);
+ current->thread.arch.faultinfo = fi;
force_sig_info(SIGSEGV, &si, current);
}
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c
index 295c1ac817b34..a9c1443fc5481 100644
--- a/arch/um/kernel/tt/process_kern.c
+++ b/arch/um/kernel/tt/process_kern.c
@@ -51,6 +51,13 @@ void switch_to_tt(void *prev, void *next)
c = 0;
+ /* Notice that here we "up" the semaphore on which "to" is waiting, and
+ * below (the read) we wait on this semaphore (which is implemented by
+ * switch_pipe) and go sleeping. Thus, after that, we have resumed in
+ * "to", and can't use any more the value of "from" (which is outdated),
+ * nor the value in "to" (since it was the task which stole us the CPU,
+ * which we don't care about). */
+
err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
if(err != sizeof(c))
panic("write of switch_pipe failed, err = %d", -err);
@@ -77,7 +84,7 @@ void switch_to_tt(void *prev, void *next)
change_sig(SIGALRM, alrm);
change_sig(SIGPROF, prof);
- arch_switch();
+ arch_switch_to_tt(prev_sched, current);
flush_tlb_all();
local_irq_restore(flags);
@@ -141,7 +148,6 @@ static void new_thread_handler(int sig)
set_cmdline("(kernel thread)");
change_sig(SIGUSR1, 1);
- change_sig(SIGVTALRM, 1);
change_sig(SIGPROF, 1);
local_irq_enable();
if(!run_kernel_thread(fn, arg, &current->thread.exec_buf))
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index 1659386b42bbf..f4bfc4c7ccac2 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -4,7 +4,7 @@
#
obj-y = aio.o elf_aux.o file.o helper.o irq.o main.o mem.o process.o sigio.o \
- signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o \
+ signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o tls.o \
user_syms.o util.o drivers/ sys-$(SUBARCH)/
obj-$(CONFIG_MODE_SKAS) += skas/
@@ -12,12 +12,9 @@ obj-$(CONFIG_TTY_LOG) += tty_log.o
user-objs-$(CONFIG_TTY_LOG) += tty_log.o
USER_OBJS := $(user-objs-y) aio.o elf_aux.o file.o helper.o irq.o main.o mem.o \
- process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o \
+ process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o tls.o \
uaccess.o umid.o util.o
-elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
-CFLAGS_elf_aux.o += -I$(objtree)/arch/um
-
CFLAGS_user_syms.o += -DSUBARCH_$(SUBARCH)
HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \
diff --git a/arch/um/os-Linux/drivers/ethertap_kern.c b/arch/um/os-Linux/drivers/ethertap_kern.c
index 6ae4b19d9f50e..768606bec2332 100644
--- a/arch/um/os-Linux/drivers/ethertap_kern.c
+++ b/arch/um/os-Linux/drivers/ethertap_kern.c
@@ -102,18 +102,7 @@ static struct transport ethertap_transport = {
static int register_ethertap(void)
{
register_transport(&ethertap_transport);
- return(1);
+ return 0;
}
__initcall(register_ethertap);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/os-Linux/drivers/tuntap_kern.c b/arch/um/os-Linux/drivers/tuntap_kern.c
index 4202b9ebad4c0..190009a6f89cd 100644
--- a/arch/um/os-Linux/drivers/tuntap_kern.c
+++ b/arch/um/os-Linux/drivers/tuntap_kern.c
@@ -87,18 +87,7 @@ static struct transport tuntap_transport = {
static int register_tuntap(void)
{
register_transport(&tuntap_transport);
- return(1);
+ return 0;
}
__initcall(register_tuntap);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index 9d7d69a523bb5..6ab372da96579 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -121,36 +121,11 @@ int create_tmp_file(unsigned long long len)
return(fd);
}
-static int create_anon_file(unsigned long long len)
-{
- void *addr;
- int fd;
-
- fd = open("/dev/anon", O_RDWR);
- if(fd < 0) {
- perror("opening /dev/anon");
- exit(1);
- }
-
- addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
- if(addr == MAP_FAILED){
- perror("mapping physmem file");
- exit(1);
- }
- munmap(addr, len);
-
- return(fd);
-}
-
-extern int have_devanon;
-
int create_mem_file(unsigned long long len)
{
int err, fd;
- if(have_devanon)
- fd = create_anon_file(len);
- else fd = create_tmp_file(len);
+ fd = create_tmp_file(len);
err = os_set_exec_close(fd, 1);
if(err < 0){
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index d261888f39c43..8176b0b520470 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -11,6 +11,7 @@
#include <linux/unistd.h>
#include <sys/mman.h>
#include <sys/wait.h>
+#include <sys/mman.h>
#include "ptrace_user.h"
#include "os.h"
#include "user.h"
@@ -20,6 +21,7 @@
#include "kern_util.h"
#include "longjmp.h"
#include "skas_ptrace.h"
+#include "kern_constants.h"
#define ARBITRARY_ADDR -1
#define FAILURE_PID -1
@@ -187,6 +189,48 @@ int os_unmap_memory(void *addr, int len)
return(0);
}
+#ifndef MADV_REMOVE
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
+#endif
+
+int os_drop_memory(void *addr, int length)
+{
+ int err;
+
+ err = madvise(addr, length, MADV_REMOVE);
+ if(err < 0)
+ err = -errno;
+ return err;
+}
+
+int can_drop_memory(void)
+{
+ void *addr;
+ int fd;
+
+ printk("Checking host MADV_REMOVE support...");
+ fd = create_mem_file(UM_KERN_PAGE_SIZE);
+ if(fd < 0){
+ printk("Creating test memory file failed, err = %d\n", -fd);
+ return 0;
+ }
+
+ addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, fd, 0);
+ if(addr == MAP_FAILED){
+ printk("Mapping test memory file failed, err = %d\n", -errno);
+ return 0;
+ }
+
+ if(madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0){
+ printk("MADV_REMOVE failed, err = %d\n", -errno);
+ return 0;
+ }
+
+ printk("OK\n");
+ return 1;
+}
+
void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int))
{
int flags = 0, pages;
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 32753131f8d84..387e26af301a5 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -470,25 +470,6 @@ int can_do_skas(void)
}
#endif
-int have_devanon = 0;
-
-/* Runs on boot kernel stack - already safe to use printk. */
-
-void check_devanon(void)
-{
- int fd;
-
- printk("Checking for /dev/anon on the host...");
- fd = open("/dev/anon", O_RDWR);
- if(fd < 0){
- printk("Not available (open failed with errno %d)\n", errno);
- return;
- }
-
- printk("OK\n");
- have_devanon = 1;
-}
-
int __init parse_iomem(char *str, int *add)
{
struct iomem_region *new;
@@ -664,6 +645,5 @@ void os_check_bugs(void)
{
check_ptrace();
check_sigio();
- check_devanon();
}
diff --git a/arch/um/os-Linux/sys-i386/Makefile b/arch/um/os-Linux/sys-i386/Makefile
index 340ef26f5944c..b3213613c41ce 100644
--- a/arch/um/os-Linux/sys-i386/Makefile
+++ b/arch/um/os-Linux/sys-i386/Makefile
@@ -3,7 +3,7 @@
# Licensed under the GPL
#
-obj-$(CONFIG_MODE_SKAS) = registers.o
+obj-$(CONFIG_MODE_SKAS) = registers.o tls.o
USER_OBJS := $(obj-y)
diff --git a/arch/um/os-Linux/sys-i386/tls.c b/arch/um/os-Linux/sys-i386/tls.c
new file mode 100644
index 0000000000000..ba21f0e04a2f3
--- /dev/null
+++ b/arch/um/os-Linux/sys-i386/tls.c
@@ -0,0 +1,33 @@
+#include <linux/unistd.h>
+#include "sysdep/tls.h"
+#include "user_util.h"
+
+static _syscall1(int, get_thread_area, user_desc_t *, u_info);
+
+/* Checks whether host supports TLS, and sets *tls_min according to the value
+ * valid on the host.
+ * i386 host have it == 6; x86_64 host have it == 12, for i386 emulation. */
+void check_host_supports_tls(int *supports_tls, int *tls_min) {
+ /* Values for x86 and x86_64.*/
+ int val[] = {GDT_ENTRY_TLS_MIN_I386, GDT_ENTRY_TLS_MIN_X86_64};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ user_desc_t info;
+ info.entry_number = val[i];
+
+ if (get_thread_area(&info) == 0) {
+ *tls_min = val[i];
+ *supports_tls = 1;
+ return;
+ } else {
+ if (errno == EINVAL)
+ continue;
+ else if (errno == ENOSYS)
+ *supports_tls = 0;
+ return;
+ }
+ }
+
+ *supports_tls = 0;
+}
diff --git a/arch/um/os-Linux/tls.c b/arch/um/os-Linux/tls.c
new file mode 100644
index 0000000000000..9cb09a45546b8
--- /dev/null
+++ b/arch/um/os-Linux/tls.c
@@ -0,0 +1,76 @@
+#include <errno.h>
+#include <sys/ptrace.h>
+#include <asm/ldt.h>
+#include "sysdep/tls.h"
+#include "uml-config.h"
+
+/* TLS support - we basically rely on the host's one.*/
+
+/* In TT mode, this should be called only by the tracing thread, and makes sense
+ * only for PTRACE_SET_THREAD_AREA. In SKAS mode, it's used normally.
+ *
+ */
+
+#ifndef PTRACE_GET_THREAD_AREA
+#define PTRACE_GET_THREAD_AREA 25
+#endif
+
+#ifndef PTRACE_SET_THREAD_AREA
+#define PTRACE_SET_THREAD_AREA 26
+#endif
+
+int os_set_thread_area(user_desc_t *info, int pid)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_SET_THREAD_AREA, pid, info->entry_number,
+ (unsigned long) info);
+ if (ret < 0)
+ ret = -errno;
+ return ret;
+}
+
+#ifdef UML_CONFIG_MODE_SKAS
+
+int os_get_thread_area(user_desc_t *info, int pid)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_GET_THREAD_AREA, pid, info->entry_number,
+ (unsigned long) info);
+ if (ret < 0)
+ ret = -errno;
+ return ret;
+}
+
+#endif
+
+#ifdef UML_CONFIG_MODE_TT
+#include "linux/unistd.h"
+
+static _syscall1(int, get_thread_area, user_desc_t *, u_info);
+static _syscall1(int, set_thread_area, user_desc_t *, u_info);
+
+int do_set_thread_area_tt(user_desc_t *info)
+{
+ int ret;
+
+ ret = set_thread_area(info);
+ if (ret < 0) {
+ ret = -errno;
+ }
+ return ret;
+}
+
+int do_get_thread_area_tt(user_desc_t *info)
+{
+ int ret;
+
+ ret = get_thread_area(info);
+ if (ret < 0) {
+ ret = -errno;
+ }
+ return ret;
+}
+
+#endif /* UML_CONFIG_MODE_TT */
diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules
index 2e41cabd3d93a..b696b451774cf 100644
--- a/arch/um/scripts/Makefile.rules
+++ b/arch/um/scripts/Makefile.rules
@@ -20,25 +20,7 @@ define unprofile
$(patsubst -pg,,$(patsubst -fprofile-arcs -ftest-coverage,,$(1)))
endef
-
-# cmd_make_link checks to see if the $(foo-dir) variable starts with a /. If
-# so, it's considered to be a path relative to $(srcdir) rather than
-# $(srcdir)/arch/$(SUBARCH). This is because x86_64 wants to get ldt.c from
-# arch/um/sys-i386 rather than arch/i386 like the other borrowed files. So,
-# it sets $(ldt.c-dir) to /arch/um/sys-i386.
-quiet_cmd_make_link = SYMLINK $@
-cmd_make_link = rm -f $@; ln -sf $(srctree)$(if $(filter-out /%,$($(notdir $@)-dir)),/arch/$(SUBARCH))/$($(notdir $@)-dir)/$(notdir $@) $@
-
-# this needs to be before the foreach, because targets does not accept
-# complete paths like $(obj)/$(f). To make sure this works, use a := assignment
-# or we will get $(obj)/$(f) in the "targets" value.
-# Also, this forces you to use the := syntax when assigning to targets.
-# Otherwise the line below will cause an infinite loop (if you don't know why,
-# just do it).
-
-targets := $(targets) $(SYMLINKS)
-
-SYMLINKS := $(foreach f,$(SYMLINKS),$(obj)/$(f))
-
-$(SYMLINKS): FORCE
- $(call if_changed,make_link)
+ifdef subarch-obj-y
+obj-y += subarch.o
+subarch-y = $(addprefix ../../$(SUBARCH)/,$(subarch-obj-y))
+endif
diff --git a/arch/um/scripts/Makefile.unmap b/arch/um/scripts/Makefile.unmap
deleted file mode 100644
index b2165188d9421..0000000000000
--- a/arch/um/scripts/Makefile.unmap
+++ /dev/null
@@ -1,22 +0,0 @@
-clean-files += unmap_tmp.o unmap_fin.o unmap.o
-
-ifdef CONFIG_MODE_TT
-
-#Always build unmap_fin.o
-extra-y += unmap_fin.o
-#Do dependency tracking for unmap.o (it will be always built, but won't get the tracking unless we use this).
-targets += unmap.o
-
-#XXX: partially copied from arch/um/scripts/Makefile.rules
-$(obj)/unmap.o: _c_flags = $(call unprofile,$(CFLAGS))
-
-quiet_cmd_wrapld = LD $@
-define cmd_wrapld
- $(LD) $(LDFLAGS) -r -o $(obj)/unmap_tmp.o $< ; \
- $(OBJCOPY) $(UML_OBJCOPYFLAGS) $(obj)/unmap_tmp.o $@ -G switcheroo
-endef
-
-$(obj)/unmap_fin.o : $(obj)/unmap.o FORCE
- $(call if_changed,wrapld)
-
-endif
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index f5fd5b0156d02..98b20b7bba4f9 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -1,23 +1,18 @@
-obj-y := bitops.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
- ptrace_user.o semaphore.o signal.o sigcontext.o syscalls.o sysrq.o \
- sys_call_table.o
+obj-y = bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
+ ptrace_user.o signal.o sigcontext.o syscalls.o sysrq.o \
+ sys_call_table.o tls.o
obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
-obj-$(CONFIG_MODULES) += module.o
+subarch-obj-y = lib/bitops.o kernel/semaphore.o
+subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem.o
+subarch-obj-$(CONFIG_MODULES) += kernel/module.o
USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o stub_segv.o
-SYMLINKS = bitops.c semaphore.c highmem.c module.c
-
include arch/um/scripts/Makefile.rules
-bitops.c-dir = lib
-semaphore.c-dir = kernel
-highmem.c-dir = mm
-module.c-dir = kernel
-
-$(obj)/stub_segv.o : _c_flags = $(call unprofile,$(CFLAGS))
+extra-$(CONFIG_MODE_TT) += unmap.o
-include arch/um/scripts/Makefile.unmap
+$(obj)/stub_segv.o $(obj)/unmap.o: \
+ _c_flags = $(call unprofile,$(CFLAGS))
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index 8032a105949a0..6028bc7cc01bd 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -15,9 +15,22 @@
#include "sysdep/sigcontext.h"
#include "sysdep/sc.h"
-void arch_switch(void)
+void arch_switch_to_tt(struct task_struct *from, struct task_struct *to)
{
- update_debugregs(current->thread.arch.debugregs_seq);
+ update_debugregs(to->thread.arch.debugregs_seq);
+ arch_switch_tls_tt(from, to);
+}
+
+void arch_switch_to_skas(struct task_struct *from, struct task_struct *to)
+{
+ int err = arch_switch_tls_skas(from, to);
+ if (!err)
+ return;
+
+ if (err != -EINVAL)
+ printk(KERN_WARNING "arch_switch_tls_skas failed, errno %d, not EINVAL\n", -err);
+ else
+ printk(KERN_WARNING "arch_switch_tls_skas failed, errno = EINVAL\n");
}
int is_syscall(unsigned long addr)
@@ -124,22 +137,22 @@ unsigned long getreg(struct task_struct *child, int regno)
int peek_user(struct task_struct *child, long addr, long data)
{
/* read the word at location addr in the USER area. */
- unsigned long tmp;
+ unsigned long tmp;
- if ((addr & 3) || addr < 0)
- return -EIO;
+ if ((addr & 3) || addr < 0)
+ return -EIO;
- tmp = 0; /* Default return condition */
- if(addr < MAX_REG_OFFSET){
- tmp = getreg(child, addr);
- }
- else if((addr >= offsetof(struct user, u_debugreg[0])) &&
- (addr <= offsetof(struct user, u_debugreg[7]))){
- addr -= offsetof(struct user, u_debugreg[0]);
- addr = addr >> 2;
- tmp = child->thread.arch.debugregs[addr];
- }
- return put_user(tmp, (unsigned long *) data);
+ tmp = 0; /* Default return condition */
+ if(addr < MAX_REG_OFFSET){
+ tmp = getreg(child, addr);
+ }
+ else if((addr >= offsetof(struct user, u_debugreg[0])) &&
+ (addr <= offsetof(struct user, u_debugreg[7]))){
+ addr -= offsetof(struct user, u_debugreg[0]);
+ addr = addr >> 2;
+ tmp = child->thread.arch.debugregs[addr];
+ }
+ return put_user(tmp, (unsigned long __user *) data);
}
struct i387_fxsave_struct {
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
index 7c376c95de506..9f3bd8ed78f5b 100644
--- a/arch/um/sys-i386/ptrace_user.c
+++ b/arch/um/sys-i386/ptrace_user.c
@@ -14,6 +14,7 @@
#include "sysdep/thread.h"
#include "user.h"
#include "os.h"
+#include "uml-config.h"
int ptrace_getregs(long pid, unsigned long *regs_out)
{
@@ -43,6 +44,7 @@ int ptrace_setfpregs(long pid, unsigned long *regs)
return 0;
}
+/* All the below stuff is of interest for TT mode only */
static void write_debugregs(int pid, unsigned long *regs)
{
struct user *dummy;
@@ -75,7 +77,6 @@ static void read_debugregs(int pid, unsigned long *regs)
/* Accessed only by the tracing thread */
static unsigned long kernel_debugregs[8] = { [ 0 ... 7 ] = 0 };
-static int debugregs_seq = 0;
void arch_enter_kernel(void *task, int pid)
{
@@ -89,6 +90,11 @@ void arch_leave_kernel(void *task, int pid)
write_debugregs(pid, TASK_DEBUGREGS(task));
}
+#ifdef UML_CONFIG_PT_PROXY
+/* Accessed only by the tracing thread */
+static int debugregs_seq;
+
+/* Only called by the ptrace proxy */
void ptrace_pokeuser(unsigned long addr, unsigned long data)
{
if((addr < offsetof(struct user, u_debugreg[0])) ||
@@ -109,6 +115,7 @@ static void update_debugregs_cb(void *arg)
write_debugregs(pid, kernel_debugregs);
}
+/* Optimized out in its header when not defined */
void update_debugregs(int seq)
{
int me;
@@ -118,6 +125,7 @@ void update_debugregs(int seq)
me = os_getpid();
initial_thread_cb(update_debugregs_cb, &me);
}
+#endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
index 33a40f5ef0d2d..f5d0e1c37ea26 100644
--- a/arch/um/sys-i386/signal.c
+++ b/arch/um/sys-i386/signal.c
@@ -19,7 +19,7 @@
#include "skas.h"
static int copy_sc_from_user_skas(struct pt_regs *regs,
- struct sigcontext *from)
+ struct sigcontext __user *from)
{
struct sigcontext sc;
unsigned long fpregs[HOST_FP_SIZE];
@@ -57,7 +57,7 @@ static int copy_sc_from_user_skas(struct pt_regs *regs,
return(0);
}
-int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
+int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate __user *to_fp,
struct pt_regs *regs, unsigned long sp)
{
struct sigcontext sc;
@@ -92,7 +92,7 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
"errno = %d\n", err);
return(1);
}
- to_fp = (to_fp ? to_fp : (struct _fpstate *) (to + 1));
+ to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
sc.fpstate = to_fp;
if(err)
@@ -113,10 +113,11 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
* saved pointer is in the kernel, but the sigcontext is in userspace, so we
* copy_to_user it.
*/
-int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
+int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from,
int fpsize)
{
- struct _fpstate *to_fp, *from_fp;
+ struct _fpstate *to_fp;
+ struct _fpstate __user *from_fp;
unsigned long sigs;
int err;
@@ -131,13 +132,14 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
return(err);
}
-int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
+int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate __user *fp,
struct sigcontext *from, int fpsize, unsigned long sp)
{
- struct _fpstate *to_fp, *from_fp;
+ struct _fpstate __user *to_fp;
+ struct _fpstate *from_fp;
int err;
- to_fp = (fp ? fp : (struct _fpstate *) (to + 1));
+ to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1));
from_fp = from->fpstate;
err = copy_to_user(to, from, sizeof(*to));
@@ -165,7 +167,7 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from)
return(ret);
}
-static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
+static int copy_sc_to_user(struct sigcontext *to, struct _fpstate __user *fp,
struct pt_regs *from, unsigned long sp)
{
return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs),
@@ -173,7 +175,7 @@ static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
copy_sc_to_user_skas(to, fp, from, sp)));
}
-static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp,
+static int copy_ucontext_to_user(struct ucontext __user *uc, struct _fpstate __user *fp,
sigset_t *set, unsigned long sp)
{
int err = 0;
@@ -188,7 +190,7 @@ static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp,
struct sigframe
{
- char *pretcode;
+ char __user *pretcode;
int sig;
struct sigcontext sc;
struct _fpstate fpstate;
@@ -198,10 +200,10 @@ struct sigframe
struct rt_sigframe
{
- char *pretcode;
+ char __user *pretcode;
int sig;
- struct siginfo *pinfo;
- void *puc;
+ struct siginfo __user *pinfo;
+ void __user *puc;
struct siginfo info;
struct ucontext uc;
struct _fpstate fpstate;
@@ -213,16 +215,16 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
sigset_t *mask)
{
struct sigframe __user *frame;
- void *restorer;
+ void __user *restorer;
unsigned long save_sp = PT_REGS_SP(regs);
int err = 0;
stack_top &= -8UL;
- frame = (struct sigframe *) stack_top - 1;
+ frame = (struct sigframe __user *) stack_top - 1;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return 1;
- restorer = (void *) frame->retcode;
+ restorer = frame->retcode;
if(ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
@@ -278,16 +280,16 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
siginfo_t *info, sigset_t *mask)
{
struct rt_sigframe __user *frame;
- void *restorer;
+ void __user *restorer;
unsigned long save_sp = PT_REGS_SP(regs);
int err = 0;
stack_top &= -8UL;
- frame = (struct rt_sigframe *) stack_top - 1;
+ frame = (struct rt_sigframe __user *) stack_top - 1;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return 1;
- restorer = (void *) frame->retcode;
+ restorer = frame->retcode;
if(ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
@@ -333,7 +335,7 @@ err:
long sys_sigreturn(struct pt_regs regs)
{
unsigned long sp = PT_REGS_SP(&current->thread.regs);
- struct sigframe __user *frame = (struct sigframe *)(sp - 8);
+ struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
sigset_t set;
struct sigcontext __user *sc = &frame->sc;
unsigned long __user *oldmask = &sc->oldmask;
@@ -365,8 +367,8 @@ long sys_sigreturn(struct pt_regs regs)
long sys_rt_sigreturn(struct pt_regs regs)
{
- unsigned long __user sp = PT_REGS_SP(&current->thread.regs);
- struct rt_sigframe __user *frame = (struct rt_sigframe *) (sp - 4);
+ unsigned long sp = PT_REGS_SP(&current->thread.regs);
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (sp - 4);
sigset_t set;
struct ucontext __user *uc = &frame->uc;
int sig_size = _NSIG_WORDS * sizeof(unsigned long);
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S
index ad75c27afe38a..1ff61474b25c6 100644
--- a/arch/um/sys-i386/sys_call_table.S
+++ b/arch/um/sys-i386/sys_call_table.S
@@ -6,8 +6,6 @@
#define sys_vm86old sys_ni_syscall
#define sys_vm86 sys_ni_syscall
-#define sys_set_thread_area sys_ni_syscall
-#define sys_get_thread_area sys_ni_syscall
#define sys_stime um_stime
#define sys_time um_time
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
index 83e9be820a869..749dd1bfe60f9 100644
--- a/arch/um/sys-i386/syscalls.c
+++ b/arch/um/sys-i386/syscalls.c
@@ -61,21 +61,27 @@ long old_select(struct sel_arg_struct __user *arg)
return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
}
-/* The i386 version skips reading from %esi, the fourth argument. So we must do
- * this, too.
+/*
+ * The prototype on i386 is:
+ *
+ * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
+ *
+ * and the "newtls" arg. on i386 is read by copy_thread directly from the
+ * register saved on the stack.
*/
long sys_clone(unsigned long clone_flags, unsigned long newsp,
- int __user *parent_tid, int unused, int __user *child_tid)
+ int __user *parent_tid, void *newtls, int __user *child_tid)
{
long ret;
if (!newsp)
newsp = UPT_SP(&current->thread.regs.regs);
+
current->thread.forking = 1;
ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
child_tid);
current->thread.forking = 0;
- return(ret);
+ return ret;
}
/*
@@ -104,7 +110,7 @@ long sys_ipc (uint call, int first, int second,
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void **) ptr))
+ if (get_user(fourth.__pad, (void __user * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
new file mode 100644
index 0000000000000..a3188e861cc7f
--- /dev/null
+++ b/arch/um/sys-i386/tls.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
+ * Licensed under the GPL
+ */
+
+#include "linux/config.h"
+#include "linux/kernel.h"
+#include "linux/sched.h"
+#include "linux/slab.h"
+#include "linux/types.h"
+#include "asm/uaccess.h"
+#include "asm/ptrace.h"
+#include "asm/segment.h"
+#include "asm/smp.h"
+#include "asm/desc.h"
+#include "choose-mode.h"
+#include "kern.h"
+#include "kern_util.h"
+#include "mode_kern.h"
+#include "os.h"
+#include "mode.h"
+
+#ifdef CONFIG_MODE_SKAS
+#include "skas.h"
+#endif
+
+/* If needed we can detect when it's uninitialized. */
+static int host_supports_tls = -1;
+int host_gdt_entry_tls_min = -1;
+
+#ifdef CONFIG_MODE_SKAS
+int do_set_thread_area_skas(struct user_desc *info)
+{
+ int ret;
+ u32 cpu;
+
+ cpu = get_cpu();
+ ret = os_set_thread_area(info, userspace_pid[cpu]);
+ put_cpu();
+ return ret;
+}
+
+int do_get_thread_area_skas(struct user_desc *info)
+{
+ int ret;
+ u32 cpu;
+
+ cpu = get_cpu();
+ ret = os_get_thread_area(info, userspace_pid[cpu]);
+ put_cpu();
+ return ret;
+}
+#endif
+
+/*
+ * sys_get_thread_area: get a yet unused TLS descriptor index.
+ * XXX: Consider leaving one free slot for glibc usage at first place. This must
+ * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
+ *
+ * Also, this must be tested when compiling in SKAS mode with dinamic linking
+ * and running against NPTL.
+ */
+static int get_free_idx(struct task_struct* task)
+{
+ struct thread_struct *t = &task->thread;
+ int idx;
+
+ if (!t->arch.tls_array)
+ return GDT_ENTRY_TLS_MIN;
+
+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+ if (!t->arch.tls_array[idx].present)
+ return idx + GDT_ENTRY_TLS_MIN;
+ return -ESRCH;
+}
+
+static inline void clear_user_desc(struct user_desc* info)
+{
+ /* Postcondition: LDT_empty(info) returns true. */
+ memset(info, 0, sizeof(*info));
+
+ /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
+ * indeed an empty user_desc.
+ */
+ info->read_exec_only = 1;
+ info->seg_not_present = 1;
+}
+
+#define O_FORCE 1
+
+static int load_TLS(int flags, struct task_struct *to)
+{
+ int ret = 0;
+ int idx;
+
+ for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
+ struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
+
+ /* Actually, now if it wasn't flushed it gets cleared and
+ * flushed to the host, which will clear it.*/
+ if (!curr->present) {
+ if (!curr->flushed) {
+ clear_user_desc(&curr->tls);
+ curr->tls.entry_number = idx;
+ } else {
+ WARN_ON(!LDT_empty(&curr->tls));
+ continue;
+ }
+ }
+
+ if (!(flags & O_FORCE) && curr->flushed)
+ continue;
+
+ ret = do_set_thread_area(&curr->tls);
+ if (ret)
+ goto out;
+
+ curr->flushed = 1;
+ }
+out:
+ return ret;
+}
+
+/* Verify if we need to do a flush for the new process, i.e. if there are any
+ * present desc's, only if they haven't been flushed.
+ */
+static inline int needs_TLS_update(struct task_struct *task)
+{
+ int i;
+ int ret = 0;
+
+ for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
+ struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
+
+ /* Can't test curr->present, we may need to clear a descriptor
+ * which had a value. */
+ if (curr->flushed)
+ continue;
+ ret = 1;
+ break;
+ }
+ return ret;
+}
+
+/* On a newly forked process, the TLS descriptors haven't yet been flushed. So
+ * we mark them as such and the first switch_to will do the job.
+ */
+void clear_flushed_tls(struct task_struct *task)
+{
+ int i;
+
+ for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
+ struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
+
+ /* Still correct to do this, if it wasn't present on the host it
+ * will remain as flushed as it was. */
+ if (!curr->present)
+ continue;
+
+ curr->flushed = 0;
+ }
+}
+
+/* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
+ * common host process. So this is needed in SKAS0 too.
+ *
+ * However, if each thread had a different host process (and this was discussed
+ * for SMP support) this won't be needed.
+ *
+ * And this will not need be used when (and if) we'll add support to the host
+ * SKAS patch. */
+
+int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
+{
+ if (!host_supports_tls)
+ return 0;
+
+ /* We have no need whatsoever to switch TLS for kernel threads; beyond
+ * that, that would also result in us calling os_set_thread_area with
+ * userspace_pid[cpu] == 0, which gives an error. */
+ if (likely(to->mm))
+ return load_TLS(O_FORCE, to);
+
+ return 0;
+}
+
+int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
+{
+ if (!host_supports_tls)
+ return 0;
+
+ if (needs_TLS_update(to))
+ return load_TLS(0, to);
+
+ return 0;
+}
+
+static int set_tls_entry(struct task_struct* task, struct user_desc *info,
+ int idx, int flushed)
+{
+ struct thread_struct *t = &task->thread;
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
+ t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
+ t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
+
+ return 0;
+}
+
+int arch_copy_tls(struct task_struct *new)
+{
+ struct user_desc info;
+ int idx, ret = -EFAULT;
+
+ if (copy_from_user(&info,
+ (void __user *) UPT_ESI(&new->thread.regs.regs),
+ sizeof(info)))
+ goto out;
+
+ ret = -EINVAL;
+ if (LDT_empty(&info))
+ goto out;
+
+ idx = info.entry_number;
+
+ ret = set_tls_entry(new, &info, idx, 0);
+out:
+ return ret;
+}
+
+/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
+static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
+{
+ struct thread_struct *t = &task->thread;
+
+ if (!t->arch.tls_array)
+ goto clear;
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
+ goto clear;
+
+ *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
+
+out:
+ /* Temporary debugging check, to make sure that things have been
+ * flushed. This could be triggered if load_TLS() failed.
+ */
+ if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
+ printk(KERN_ERR "get_tls_entry: task with pid %d got here "
+ "without flushed TLS.", current->pid);
+ }
+
+ return 0;
+clear:
+ /* When the TLS entry has not been set, the values read to user in the
+ * tls_array are 0 (because it's cleared at boot, see
+ * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
+ */
+ clear_user_desc(info);
+ info->entry_number = idx;
+ goto out;
+}
+
+asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
+{
+ struct user_desc info;
+ int idx, ret;
+
+ if (!host_supports_tls)
+ return -ENOSYS;
+
+ if (copy_from_user(&info, user_desc, sizeof(info)))
+ return -EFAULT;
+
+ idx = info.entry_number;
+
+ if (idx == -1) {
+ idx = get_free_idx(current);
+ if (idx < 0)
+ return idx;
+ info.entry_number = idx;
+ /* Tell the user which slot we chose for him.*/
+ if (put_user(idx, &user_desc->entry_number))
+ return -EFAULT;
+ }
+
+ ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
+ if (ret)
+ return ret;
+ return set_tls_entry(current, &info, idx, 1);
+}
+
+/*
+ * Perform set_thread_area on behalf of the traced child.
+ * Note: error handling is not done on the deferred load, and this differ from
+ * i386. However the only possible error are caused by bugs.
+ */
+int ptrace_set_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc)
+{
+ struct user_desc info;
+
+ if (!host_supports_tls)
+ return -EIO;
+
+ if (copy_from_user(&info, user_desc, sizeof(info)))
+ return -EFAULT;
+
+ return set_tls_entry(child, &info, idx, 0);
+}
+
+asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
+{
+ struct user_desc info;
+ int idx, ret;
+
+ if (!host_supports_tls)
+ return -ENOSYS;
+
+ if (get_user(idx, &user_desc->entry_number))
+ return -EFAULT;
+
+ ret = get_tls_entry(current, &info, idx);
+ if (ret < 0)
+ goto out;
+
+ if (copy_to_user(user_desc, &info, sizeof(info)))
+ ret = -EFAULT;
+
+out:
+ return ret;
+}
+
+/*
+ * Perform get_thread_area on behalf of the traced child.
+ */
+int ptrace_get_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc)
+{
+ struct user_desc info;
+ int ret;
+
+ if (!host_supports_tls)
+ return -EIO;
+
+ ret = get_tls_entry(child, &info, idx);
+ if (ret < 0)
+ goto out;
+
+ if (copy_to_user(user_desc, &info, sizeof(info)))
+ ret = -EFAULT;
+out:
+ return ret;
+}
+
+
+/* XXX: This part is probably common to i386 and x86-64. Don't create a common
+ * file for now, do that when implementing x86-64 support.*/
+static int __init __setup_host_supports_tls(void) {
+ check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
+ if (host_supports_tls) {
+ printk(KERN_INFO "Host TLS support detected\n");
+ printk(KERN_INFO "Detected host type: ");
+ switch (host_gdt_entry_tls_min) {
+ case GDT_ENTRY_TLS_MIN_I386:
+ printk("i386\n");
+ break;
+ case GDT_ENTRY_TLS_MIN_X86_64:
+ printk("x86_64\n");
+ break;
+ }
+ } else
+ printk(KERN_ERR " Host TLS support NOT detected! "
+ "TLS support inside UML will not work\n");
+ return 1;
+}
+
+__initcall(__setup_host_supports_tls);
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index a351091fbd99c..b5fc22babddf2 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -4,31 +4,23 @@
# Licensed under the GPL
#
-#XXX: why into lib-y?
-lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o ldt.o mem.o memcpy.o \
- ptrace.o ptrace_user.o sigcontext.o signal.o syscalls.o \
- syscall_table.o sysrq.o thunk.o
-lib-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
+obj-y = bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
+ sigcontext.o signal.o syscalls.o syscall_table.o sysrq.o ksyms.o \
+ tls.o
-obj-y := ksyms.o
-obj-$(CONFIG_MODULES) += module.o um_module.o
+obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
+obj-$(CONFIG_MODULES) += um_module.o
-USER_OBJS := ptrace_user.o sigcontext.o stub_segv.o
+subarch-obj-y = lib/bitops.o lib/csum-partial.o lib/memcpy.o lib/thunk.o
+subarch-obj-$(CONFIG_MODULES) += kernel/module.o
-SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c ldt.c memcpy.S \
- thunk.S module.c
+ldt-y = ../sys-i386/ldt.o
-include arch/um/scripts/Makefile.rules
+USER_OBJS := ptrace_user.o sigcontext.o stub_segv.o
-bitops.c-dir = lib
-csum-copy.S-dir = lib
-csum-partial.c-dir = lib
-csum-wrappers.c-dir = lib
-ldt.c-dir = /arch/um/sys-i386
-memcpy.S-dir = lib
-thunk.S-dir = lib
-module.c-dir = kernel
+include arch/um/scripts/Makefile.rules
-$(obj)/stub_segv.o: _c_flags = $(call unprofile,$(CFLAGS))
+extra-$(CONFIG_MODE_TT) += unmap.o
-include arch/um/scripts/Makefile.unmap
+$(obj)/stub_segv.o $(obj)/unmap.o: \
+ _c_flags = $(call unprofile,$(CFLAGS))
diff --git a/arch/um/sys-x86_64/tls.c b/arch/um/sys-x86_64/tls.c
new file mode 100644
index 0000000000000..ce1bf1b81c431
--- /dev/null
+++ b/arch/um/sys-x86_64/tls.c
@@ -0,0 +1,14 @@
+#include "linux/sched.h"
+
+void debug_arch_force_load_TLS(void)
+{
+}
+
+void clear_flushed_tls(struct task_struct *task)
+{
+}
+
+int arch_copy_tls(struct task_struct *t)
+{
+ return 0;
+}
diff --git a/arch/x86_64/ia32/vsyscall-sigreturn.S b/arch/x86_64/ia32/vsyscall-sigreturn.S
index d90321fe9bba1..1384367cdbe1f 100644
--- a/arch/x86_64/ia32/vsyscall-sigreturn.S
+++ b/arch/x86_64/ia32/vsyscall-sigreturn.S
@@ -32,9 +32,28 @@ __kernel_rt_sigreturn:
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
.section .eh_frame,"a",@progbits
+.LSTARTFRAMES:
+ .long .LENDCIES-.LSTARTCIES
+.LSTARTCIES:
+ .long 0 /* CIE ID */
+ .byte 1 /* Version number */
+ .string "zRS" /* NUL-terminated augmentation string */
+ .uleb128 1 /* Code alignment factor */
+ .sleb128 -4 /* Data alignment factor */
+ .byte 8 /* Return address register column */
+ .uleb128 1 /* Augmentation value length */
+ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+ .byte 0x0c /* DW_CFA_def_cfa */
+ .uleb128 4
+ .uleb128 4
+ .byte 0x88 /* DW_CFA_offset, column 0x8 */
+ .uleb128 1
+ .align 4
+.LENDCIES:
+
.long .LENDFDE2-.LSTARTFDE2 /* Length FDE */
.LSTARTFDE2:
- .long .LSTARTFDE2-.LSTARTFRAME /* CIE pointer */
+ .long .LSTARTFDE2-.LSTARTFRAMES /* CIE pointer */
/* HACK: The dwarf2 unwind routines will subtract 1 from the
return address to get an address in the middle of the
presumed call instruction. Since we didn't get here via
@@ -97,7 +116,7 @@ __kernel_rt_sigreturn:
.long .LENDFDE3-.LSTARTFDE3 /* Length FDE */
.LSTARTFDE3:
- .long .LSTARTFDE3-.LSTARTFRAME /* CIE pointer */
+ .long .LSTARTFDE3-.LSTARTFRAMES /* CIE pointer */
/* HACK: See above wrt unwind library assumptions. */
.long .LSTART_rt_sigreturn-1-. /* PC-relative start address */
.long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index d54620147e8e3..100a30c400449 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -615,7 +615,7 @@ static int __init apic_set_verbosity(char *str)
printk(KERN_WARNING "APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug", str);
- return 0;
+ return 1;
}
__setup("apic=", apic_set_verbosity);
@@ -1137,35 +1137,35 @@ int __init APIC_init_uniprocessor (void)
static __init int setup_disableapic(char *str)
{
disable_apic = 1;
- return 0;
+ return 1;
}
static __init int setup_nolapic(char *str)
{
disable_apic = 1;
- return 0;
+ return 1;
}
static __init int setup_noapictimer(char *str)
{
if (str[0] != ' ' && str[0] != 0)
- return -1;
+ return 0;
disable_apic_timer = 1;
- return 0;
+ return 1;
}
static __init int setup_apicmaintimer(char *str)
{
apic_runs_main_timer = 1;
nohpet = 1;
- return 0;
+ return 1;
}
__setup("apicmaintimer", setup_apicmaintimer);
static __init int setup_noapicmaintimer(char *str)
{
apic_runs_main_timer = -1;
- return 0;
+ return 1;
}
__setup("noapicmaintimer", setup_noapicmaintimer);
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index 13af920b65942..b93ef5b519806 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -221,7 +221,7 @@ int __init setup_early_printk(char *opt)
char buf[256];
if (early_console_initialized)
- return -1;
+ return 1;
strlcpy(buf,opt,sizeof(buf));
space = strchr(buf, ' ');
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 04282ef9fbd4b..10b3e348fc996 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -501,7 +501,7 @@ static struct miscdevice mce_log_device = {
static int __init mcheck_disable(char *str)
{
mce_dont_init = 1;
- return 0;
+ return 1;
}
/* mce=off disables machine check. Note you can reenable it later
@@ -521,7 +521,7 @@ static int __init mcheck_enable(char *str)
get_option(&str, &tolerant);
else
printk("mce= argument %s ignored. Please use /sys", str);
- return 0;
+ return 1;
}
__setup("nomce", mcheck_disable);
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index ee5ee4891f3d3..b0444a415bd60 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -121,7 +121,7 @@ unsigned int do_gettimeoffset_pm(void)
static int __init nopmtimer_setup(char *s)
{
pmtmr_ioport = 0;
- return 0;
+ return 1;
}
__setup("nopmtimer", nopmtimer_setup);
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index d1f3e9272c054..0856ad444f90d 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -540,7 +540,7 @@ void __init alternative_instructions(void)
static int __init noreplacement_setup(char *s)
{
no_replacement = 1;
- return 0;
+ return 1;
}
__setup("noreplacement", noreplacement_setup);
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index eabdb63fec310..8a691fa6d3938 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -55,7 +55,7 @@ int __init nonx_setup(char *str)
do_not_nx = 1;
__supported_pte_mask &= ~_PAGE_NX;
}
- return 0;
+ return 1;
}
__setup("noexec=", nonx_setup); /* parsed early actually */
@@ -74,7 +74,7 @@ static int __init nonx32_setup(char *str)
force_personality32 &= ~READ_IMPLIES_EXEC;
else if (!strcmp(str, "off"))
force_personality32 |= READ_IMPLIES_EXEC;
- return 0;
+ return 1;
}
__setup("noexec32=", nonx32_setup);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index ea48fa6380700..71a7222cf9ce1 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -353,7 +353,7 @@ static void __cpuinit tsc_sync_wait(void)
static __init int notscsync_setup(char *s)
{
notscsync = 1;
- return 0;
+ return 1;
}
__setup("notscsync", notscsync_setup);
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 473b514b66e43..ef8bc46dc1402 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -1306,7 +1306,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static int __init nohpet_setup(char *s)
{
nohpet = 1;
- return 0;
+ return 1;
}
__setup("nohpet", nohpet_setup);
@@ -1314,7 +1314,7 @@ __setup("nohpet", nohpet_setup);
int __init notsc_setup(char *s)
{
notsc = 1;
- return 0;
+ return 1;
}
__setup("notsc", notsc_setup);
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index edaa9fe654dce..6bda322d3cafe 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -973,14 +973,14 @@ void __init trap_init(void)
static int __init oops_dummy(char *s)
{
panic_on_oops = 1;
- return -1;
+ return 1;
}
__setup("oops=", oops_dummy);
static int __init kstack_setup(char *s)
{
kstack_depth_to_print = simple_strtoul(s,NULL,0);
- return 0;
+ return 1;
}
__setup("kstack=", kstack_setup);
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index d96a9348e5a24..d78f46056bda7 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -102,8 +102,6 @@ EXPORT_SYMBOL(cpu_callout_map);
EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(get_wchan);
-
EXPORT_SYMBOL(rtc_lock);
EXPORT_SYMBOL_GPL(set_nmi_callback);
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 316c53de47bd8..55250593d8c94 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -623,6 +623,6 @@ void vmalloc_sync_all(void)
static int __init enable_pagefaulttrace(char *str)
{
page_fault_trace = 1;
- return 0;
+ return 1;
}
__setup("pagefaulttrace", enable_pagefaulttrace);
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index efae56a514758..152b9370789b1 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -113,8 +113,6 @@ EXPORT_SYMBOL(__xtensa_copy_user);
// FIXME EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(get_wchan);
-
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
diff --git a/block/Kconfig b/block/Kconfig
index 5536839886ff2..b6f5f0a79655a 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -27,10 +27,10 @@ config BLK_DEV_IO_TRACE
config LSF
bool "Support for Large Single Files"
depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
- default n
help
- When CONFIG_LBD is disabled, say Y here if you want to
- handle large file(bigger than 2TB), otherwise say N.
- When CONFIG_LBD is enabled, Y is set automatically.
+ Say Y here if you want to be able to handle very large files (bigger
+ than 2TB), otherwise say N.
+
+ If unsure, say Y.
source block/Kconfig.iosched
diff --git a/block/elevator.c b/block/elevator.c
index 56c2ed06a9e24..0d6be03d929ea 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -145,7 +145,7 @@ static int __init elevator_setup(char *str)
strcpy(chosen_elevator, "anticipatory");
else
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
- return 0;
+ return 1;
}
__setup("elevator=", elevator_setup);
diff --git a/block/genhd.c b/block/genhd.c
index db4c60c802d6d..5a8d3bf02f171 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -17,8 +17,6 @@
#include <linux/buffer_head.h>
#include <linux/mutex.h>
-#define MAX_PROBE_HASH 255 /* random */
-
static struct subsystem block_subsys;
static DEFINE_MUTEX(block_subsys_lock);
@@ -31,108 +29,29 @@ static struct blk_major_name {
struct blk_major_name *next;
int major;
char name[16];
-} *major_names[MAX_PROBE_HASH];
+} *major_names[BLKDEV_MAJOR_HASH_SIZE];
/* index in the above - for now: assume no multimajor ranges */
static inline int major_to_index(int major)
{
- return major % MAX_PROBE_HASH;
-}
-
-struct blkdev_info {
- int index;
- struct blk_major_name *bd;
-};
-
-/*
- * iterate over a list of blkdev_info structures. allows
- * the major_names array to be iterated over from outside this file
- * must be called with the block_subsys_lock held
- */
-void *get_next_blkdev(void *dev)
-{
- struct blkdev_info *info;
-
- if (dev == NULL) {
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto out;
- info->index=0;
- info->bd = major_names[info->index];
- if (info->bd)
- goto out;
- } else {
- info = dev;
- }
-
- while (info->index < ARRAY_SIZE(major_names)) {
- if (info->bd)
- info->bd = info->bd->next;
- if (info->bd)
- goto out;
- /*
- * No devices on this chain, move to the next
- */
- info->index++;
- info->bd = (info->index < ARRAY_SIZE(major_names)) ?
- major_names[info->index] : NULL;
- if (info->bd)
- goto out;
- }
-
-out:
- return info;
-}
-
-void *acquire_blkdev_list(void)
-{
- mutex_lock(&block_subsys_lock);
- return get_next_blkdev(NULL);
-}
-
-void release_blkdev_list(void *dev)
-{
- mutex_unlock(&block_subsys_lock);
- kfree(dev);
+ return major % BLKDEV_MAJOR_HASH_SIZE;
}
+#ifdef CONFIG_PROC_FS
-/*
- * Count the number of records in the blkdev_list.
- * must be called with the block_subsys_lock held
- */
-int count_blkdev_list(void)
+void blkdev_show(struct seq_file *f, off_t offset)
{
- struct blk_major_name *n;
- int i, count;
+ struct blk_major_name *dp;
- count = 0;
-
- for (i = 0; i < ARRAY_SIZE(major_names); i++) {
- for (n = major_names[i]; n; n = n->next)
- count++;
+ if (offset < BLKDEV_MAJOR_HASH_SIZE) {
+ mutex_lock(&block_subsys_lock);
+ for (dp = major_names[offset]; dp; dp = dp->next)
+ seq_printf(f, "%3d %s\n", dp->major, dp->name);
+ mutex_unlock(&block_subsys_lock);
}
-
- return count;
-}
-
-/*
- * extract the major and name values from a blkdev_info struct
- * passed in as a void to *dev. Must be called with
- * block_subsys_lock held
- */
-int get_blkdev_info(void *dev, int *major, char **name)
-{
- struct blkdev_info *info = dev;
-
- if (info->bd == NULL)
- return 1;
-
- *major = info->bd->major;
- *name = info->bd->name;
- return 0;
}
+#endif /* CONFIG_PROC_FS */
int register_blkdev(unsigned int major, const char *name)
{
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 5b26af8597f3f..e112d1a5dab6b 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1740,7 +1740,7 @@ EXPORT_SYMBOL(blk_run_queue);
/**
* blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
- * @q: the request queue to be released
+ * @kobj: the kobj belonging of the request queue to be released
*
* Description:
* blk_cleanup_queue is the pair to blk_init_queue() or
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9f5c0da57c904..5c91d6afb1173 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -64,6 +64,8 @@ source "drivers/usb/Kconfig"
source "drivers/mmc/Kconfig"
+source "drivers/leds/Kconfig"
+
source "drivers/infiniband/Kconfig"
source "drivers/sn/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 424955274e60c..447d8e68887a1 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -25,9 +25,6 @@ obj-$(CONFIG_CONNECTOR) += connector/
obj-$(CONFIG_FB_I810) += video/i810/
obj-$(CONFIG_FB_INTEL) += video/intelfb/
-# we also need input/serio early so serio bus is initialized by the time
-# serial drivers start registering their serio ports
-obj-$(CONFIG_SERIO) += input/serio/
obj-y += serial/
obj-$(CONFIG_PARPORT) += parport/
obj-y += base/ block/ misc/ mfd/ net/ media/
@@ -53,6 +50,7 @@ obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_USB) += usb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/gadget/
+obj-$(CONFIG_SERIO) += input/serio/
obj-$(CONFIG_GAMEPORT) += input/gameport/
obj-$(CONFIG_INPUT) += input/
obj-$(CONFIG_I2O) += message/
@@ -69,7 +67,9 @@ obj-$(CONFIG_MCA) += mca/
obj-$(CONFIG_EISA) += eisa/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_MMC) += mmc/
+obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
+obj-$(CONFIG_IPATH_CORE) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 79b09d76c1802..eee0864ba300a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1572,7 +1572,7 @@ static void __exit acpi_ec_exit(void)
static int __init acpi_fake_ecdt_setup(char *str)
{
acpi_fake_ecdt_enabled = 1;
- return 0;
+ return 1;
}
__setup("acpi_fake_ecdt", acpi_fake_ecdt_setup);
@@ -1591,7 +1591,7 @@ static int __init acpi_ec_set_intr_mode(char *str)
acpi_ec_driver.ops.add = acpi_ec_poll_add;
}
printk(KERN_INFO PREFIX "EC %s mode.\n", intr ? "interrupt" : "polling");
- return 0;
+ return 1;
}
__setup("ec_intr=", acpi_ec_set_intr_mode);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index b6e2909562140..2a8af685926fe 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1850,6 +1850,7 @@ static int __init amiga_floppy_setup (char *str)
return 0;
printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n);
fd_def_df0 = n;
+ return 1;
}
__setup("floppy=", amiga_floppy_setup);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 9888bc1517555..473a13b22b298 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -65,7 +65,7 @@ MODULE_LICENSE("GPL");
typedef struct bluecard_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct hci_dev *hdev;
@@ -85,8 +85,8 @@ typedef struct bluecard_info_t {
} bluecard_info_t;
-static void bluecard_config(dev_link_t *link);
-static void bluecard_release(dev_link_t *link);
+static int bluecard_config(struct pcmcia_device *link);
+static void bluecard_release(struct pcmcia_device *link);
static void bluecard_detach(struct pcmcia_device *p_dev);
@@ -162,7 +162,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
static void bluecard_activity_led_timeout(u_long arg)
{
bluecard_info_t *info = (bluecard_info_t *)arg;
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
return;
@@ -179,7 +179,7 @@ static void bluecard_activity_led_timeout(u_long arg)
static void bluecard_enable_activity_led(bluecard_info_t *info)
{
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
return;
@@ -235,7 +235,7 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
}
do {
- register unsigned int iobase = info->link.io.BasePort1;
+ register unsigned int iobase = info->p_dev->io.BasePort1;
register unsigned int offset;
register unsigned char command;
register unsigned long ready_bit;
@@ -244,7 +244,7 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
clear_bit(XMIT_WAKEUP, &(info->tx_state));
- if (!(info->link.state & DEV_PRESENT))
+ if (!pcmcia_dev_present(info->p_dev))
return;
if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) {
@@ -382,7 +382,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
return;
}
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
if (test_bit(XMIT_SENDING_READY, &(info->tx_state)))
bluecard_enable_activity_led(info);
@@ -512,7 +512,7 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst, struct pt_regs *r
if (!test_bit(CARD_READY, &(info->hw_state)))
return IRQ_HANDLED;
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
spin_lock(&(info->lock));
@@ -626,7 +626,7 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
static int bluecard_hci_open(struct hci_dev *hdev)
{
bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
@@ -646,7 +646,7 @@ static int bluecard_hci_open(struct hci_dev *hdev)
static int bluecard_hci_close(struct hci_dev *hdev)
{
bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
@@ -713,7 +713,7 @@ static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned l
static int bluecard_open(bluecard_info_t *info)
{
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
struct hci_dev *hdev;
unsigned char id;
@@ -831,7 +831,7 @@ static int bluecard_open(bluecard_info_t *info)
static int bluecard_close(bluecard_info_t *info)
{
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
struct hci_dev *hdev = info->hdev;
if (!hdev)
@@ -856,17 +856,16 @@ static int bluecard_close(bluecard_info_t *info)
return 0;
}
-static int bluecard_attach(struct pcmcia_device *p_dev)
+static int bluecard_probe(struct pcmcia_device *link)
{
bluecard_info_t *info;
- dev_link_t *link;
/* Create new info device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -878,32 +877,22 @@ static int bluecard_attach(struct pcmcia_device *p_dev)
link->irq.Instance = info;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- bluecard_config(link);
-
- return 0;
+ return bluecard_config(link);
}
-static void bluecard_detach(struct pcmcia_device *p_dev)
+static void bluecard_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
bluecard_info_t *info = link->priv;
- if (link->state & DEV_CONFIG)
- bluecard_release(link);
-
+ bluecard_release(link);
kfree(info);
}
-static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
int i;
@@ -918,14 +907,12 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static void bluecard_config(dev_link_t *link)
+static int bluecard_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
bluecard_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
cisparse_t parse;
- config_info_t config;
int i, n, last_ret, last_fn;
tuple.TupleData = (cisdata_t *)buf;
@@ -935,7 +922,7 @@ static void bluecard_config(dev_link_t *link)
/* Get configuration register information */
tuple.DesiredTuple = CISTPL_CONFIG;
- last_ret = first_tuple(handle, &tuple, &parse);
+ last_ret = first_tuple(link, &tuple, &parse);
if (last_ret != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
@@ -943,36 +930,31 @@ static void bluecard_config(dev_link_t *link)
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
- i = pcmcia_get_configuration_info(handle, &config);
- link->conf.Vcc = config.Vcc;
-
link->conf.ConfigIndex = 0x20;
link->io.NumPorts1 = 64;
link->io.IOAddrLines = 6;
for (n = 0; n < 0x400; n += 0x40) {
link->io.BasePort1 = n ^ 0x300;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
break;
}
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
goto failed;
}
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIRQ, i);
+ cs_error(link, RequestIRQ, i);
link->irq.AssignedIRQ = 0;
}
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, i);
+ cs_error(link, RequestConfiguration, i);
goto failed;
}
@@ -980,57 +962,28 @@ static void bluecard_config(dev_link_t *link)
goto failed;
strcpy(info->node.dev_name, info->hdev->name);
- link->dev = &info->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &info->node;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
bluecard_release(link);
+ return -ENODEV;
}
-static void bluecard_release(dev_link_t *link)
+static void bluecard_release(struct pcmcia_device *link)
{
bluecard_info_t *info = link->priv;
- if (link->state & DEV_PRESENT)
- bluecard_close(info);
+ bluecard_close(info);
del_timer(&(info->timer));
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
-}
-
-static int bluecard_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int bluecard_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (DEV_OK(link))
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
+ pcmcia_disable_device(link);
}
static struct pcmcia_device_id bluecard_ids[] = {
@@ -1046,11 +999,9 @@ static struct pcmcia_driver bluecard_driver = {
.drv = {
.name = "bluecard_cs",
},
- .probe = bluecard_attach,
+ .probe = bluecard_probe,
.remove = bluecard_detach,
.id_table = bluecard_ids,
- .suspend = bluecard_suspend,
- .resume = bluecard_resume,
};
static int __init init_bluecard_cs(void)
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 7e21b1ff27c42..b94ac2f9f7baf 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -72,7 +72,7 @@ MODULE_LICENSE("GPL");
typedef struct bt3c_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct hci_dev *hdev;
@@ -88,8 +88,8 @@ typedef struct bt3c_info_t {
} bt3c_info_t;
-static void bt3c_config(dev_link_t *link);
-static void bt3c_release(dev_link_t *link);
+static int bt3c_config(struct pcmcia_device *link);
+static void bt3c_release(struct pcmcia_device *link);
static void bt3c_detach(struct pcmcia_device *p_dev);
@@ -191,11 +191,11 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
return;
do {
- register unsigned int iobase = info->link.io.BasePort1;
+ register unsigned int iobase = info->p_dev->io.BasePort1;
register struct sk_buff *skb;
register int len;
- if (!(info->link.state & DEV_PRESENT))
+ if (!pcmcia_dev_present(info->p_dev))
break;
@@ -229,7 +229,7 @@ static void bt3c_receive(bt3c_info_t *info)
return;
}
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
avail = bt3c_read(iobase, 0x7006);
//printk("bt3c_cs: receiving %d bytes\n", avail);
@@ -350,7 +350,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst, struct pt_regs *regs)
return IRQ_NONE;
}
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
spin_lock(&(info->lock));
@@ -481,7 +481,7 @@ static int bt3c_load_firmware(bt3c_info_t *info, unsigned char *firmware, int co
unsigned int iobase, size, addr, fcs, tmp;
int i, err = 0;
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
/* Reset */
bt3c_io_write(iobase, 0x8040, 0x0404);
@@ -562,7 +562,6 @@ static int bt3c_open(bt3c_info_t *info)
{
const struct firmware *firmware;
struct hci_dev *hdev;
- client_handle_t handle;
int err;
spin_lock_init(&(info->lock));
@@ -594,10 +593,8 @@ static int bt3c_open(bt3c_info_t *info)
hdev->owner = THIS_MODULE;
- handle = info->link.handle;
-
/* Load firmware */
- err = request_firmware(&firmware, "BT3CPCC.bin", &handle_to_dev(handle));
+ err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
if (err < 0) {
BT_ERR("Firmware request failed");
goto error;
@@ -648,17 +645,16 @@ static int bt3c_close(bt3c_info_t *info)
return 0;
}
-static int bt3c_attach(struct pcmcia_device *p_dev)
+static int bt3c_probe(struct pcmcia_device *link)
{
bt3c_info_t *info;
- dev_link_t *link;
/* Create new info device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -670,31 +666,21 @@ static int bt3c_attach(struct pcmcia_device *p_dev)
link->irq.Instance = info;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- bt3c_config(link);
-
- return 0;
+ return bt3c_config(link);
}
-static void bt3c_detach(struct pcmcia_device *p_dev)
+static void bt3c_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
bt3c_info_t *info = link->priv;
- if (link->state & DEV_CONFIG)
- bt3c_release(link);
-
+ bt3c_release(link);
kfree(info);
}
-static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
int i;
@@ -705,30 +691,28 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS)
return CS_NO_MORE_ITEMS;
return get_tuple(handle, tuple, parse);
}
-static int next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS)
return CS_NO_MORE_ITEMS;
return get_tuple(handle, tuple, parse);
}
-static void bt3c_config(dev_link_t *link)
+static int bt3c_config(struct pcmcia_device *link)
{
static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
- client_handle_t handle = link->handle;
bt3c_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
cisparse_t parse;
cistpl_cftable_entry_t *cf = &parse.cftable_entry;
- config_info_t config;
int i, j, try, last_ret, last_fn;
tuple.TupleData = (cisdata_t *)buf;
@@ -738,7 +722,7 @@ static void bt3c_config(dev_link_t *link)
/* Get configuration register information */
tuple.DesiredTuple = CISTPL_CONFIG;
- last_ret = first_tuple(handle, &tuple, &parse);
+ last_ret = first_tuple(link, &tuple, &parse);
if (last_ret != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
@@ -746,11 +730,6 @@ static void bt3c_config(dev_link_t *link)
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
- i = pcmcia_get_configuration_info(handle, &config);
- link->conf.Vcc = config.Vcc;
-
/* First pass: look for a config entry that looks normal. */
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleOffset = 0;
@@ -759,59 +738,59 @@ static void bt3c_config(dev_link_t *link)
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
/* Two tries: without IO aliases, then with aliases */
for (try = 0; try < 2; try++) {
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i != CS_NO_MORE_ITEMS) {
if (i != CS_SUCCESS)
goto next_entry;
if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+ link->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && (cf->io.win[0].base != 0)) {
link->conf.ConfigIndex = cf->index;
link->io.BasePort1 = cf->io.win[0].base;
link->io.IOAddrLines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
goto found_port;
}
next_entry:
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
}
/* Second pass: try to find an entry that isn't picky about
its base address, then try to grab any standard serial port
address, and finally try to get any free port. */
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i != CS_NO_MORE_ITEMS) {
if ((i == CS_SUCCESS) && (cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
link->conf.ConfigIndex = cf->index;
for (j = 0; j < 5; j++) {
link->io.BasePort1 = base[j];
link->io.IOAddrLines = base[j] ? 16 : 3;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
goto found_port;
}
}
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
found_port:
if (i != CS_SUCCESS) {
BT_ERR("No usable port range found");
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
goto failed;
}
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIRQ, i);
+ cs_error(link, RequestIRQ, i);
link->irq.AssignedIRQ = 0;
}
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, i);
+ cs_error(link, RequestConfiguration, i);
goto failed;
}
@@ -819,55 +798,26 @@ found_port:
goto failed;
strcpy(info->node.dev_name, info->hdev->name);
- link->dev = &info->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &info->node;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
bt3c_release(link);
+ return -ENODEV;
}
-static void bt3c_release(dev_link_t *link)
+static void bt3c_release(struct pcmcia_device *link)
{
bt3c_info_t *info = link->priv;
- if (link->state & DEV_PRESENT)
- bt3c_close(info);
-
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
-}
-
-static int bt3c_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
+ bt3c_close(info);
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int bt3c_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (DEV_OK(link))
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
+ pcmcia_disable_device(link);
}
@@ -882,11 +832,9 @@ static struct pcmcia_driver bt3c_driver = {
.drv = {
.name = "bt3c_cs",
},
- .probe = bt3c_attach,
+ .probe = bt3c_probe,
.remove = bt3c_detach,
.id_table = bt3c_ids,
- .suspend = bt3c_suspend,
- .resume = bt3c_resume,
};
static int __init init_bt3c_cs(void)
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 7b4bff4cfa2df..9ce4c93467e57 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -68,7 +68,7 @@ MODULE_LICENSE("GPL");
typedef struct btuart_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct hci_dev *hdev;
@@ -84,8 +84,8 @@ typedef struct btuart_info_t {
} btuart_info_t;
-static void btuart_config(dev_link_t *link);
-static void btuart_release(dev_link_t *link);
+static int btuart_config(struct pcmcia_device *link);
+static void btuart_release(struct pcmcia_device *link);
static void btuart_detach(struct pcmcia_device *p_dev);
@@ -146,13 +146,13 @@ static void btuart_write_wakeup(btuart_info_t *info)
}
do {
- register unsigned int iobase = info->link.io.BasePort1;
+ register unsigned int iobase = info->p_dev->io.BasePort1;
register struct sk_buff *skb;
register int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
- if (!(info->link.state & DEV_PRESENT))
+ if (!pcmcia_dev_present(info->p_dev))
return;
if (!(skb = skb_dequeue(&(info->txq))))
@@ -187,7 +187,7 @@ static void btuart_receive(btuart_info_t *info)
return;
}
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
do {
info->hdev->stat.byte_rx++;
@@ -301,7 +301,7 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst, struct pt_regs *reg
return IRQ_NONE;
}
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
spin_lock(&(info->lock));
@@ -357,7 +357,7 @@ static void btuart_change_speed(btuart_info_t *info, unsigned int speed)
return;
}
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
spin_lock_irqsave(&(info->lock), flags);
@@ -481,7 +481,7 @@ static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned lon
static int btuart_open(btuart_info_t *info)
{
unsigned long flags;
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
struct hci_dev *hdev;
spin_lock_init(&(info->lock));
@@ -550,7 +550,7 @@ static int btuart_open(btuart_info_t *info)
static int btuart_close(btuart_info_t *info)
{
unsigned long flags;
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
struct hci_dev *hdev = info->hdev;
if (!hdev)
@@ -576,17 +576,16 @@ static int btuart_close(btuart_info_t *info)
return 0;
}
-static int btuart_attach(struct pcmcia_device *p_dev)
+static int btuart_probe(struct pcmcia_device *link)
{
btuart_info_t *info;
- dev_link_t *link;
/* Create new info device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -598,31 +597,21 @@ static int btuart_attach(struct pcmcia_device *p_dev)
link->irq.Instance = info;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- btuart_config(link);
-
- return 0;
+ return btuart_config(link);
}
-static void btuart_detach(struct pcmcia_device *p_dev)
+static void btuart_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
btuart_info_t *info = link->priv;
- if (link->state & DEV_CONFIG)
- btuart_release(link);
-
+ btuart_release(link);
kfree(info);
}
-static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
int i;
@@ -633,30 +622,28 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS)
return CS_NO_MORE_ITEMS;
return get_tuple(handle, tuple, parse);
}
-static int next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS)
return CS_NO_MORE_ITEMS;
return get_tuple(handle, tuple, parse);
}
-static void btuart_config(dev_link_t *link)
+static int btuart_config(struct pcmcia_device *link)
{
static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
- client_handle_t handle = link->handle;
btuart_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
cisparse_t parse;
cistpl_cftable_entry_t *cf = &parse.cftable_entry;
- config_info_t config;
int i, j, try, last_ret, last_fn;
tuple.TupleData = (cisdata_t *)buf;
@@ -666,7 +653,7 @@ static void btuart_config(dev_link_t *link)
/* Get configuration register information */
tuple.DesiredTuple = CISTPL_CONFIG;
- last_ret = first_tuple(handle, &tuple, &parse);
+ last_ret = first_tuple(link, &tuple, &parse);
if (last_ret != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
@@ -674,11 +661,6 @@ static void btuart_config(dev_link_t *link)
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
- i = pcmcia_get_configuration_info(handle, &config);
- link->conf.Vcc = config.Vcc;
-
/* First pass: look for a config entry that looks normal. */
tuple.TupleData = (cisdata_t *) buf;
tuple.TupleOffset = 0;
@@ -687,29 +669,29 @@ static void btuart_config(dev_link_t *link)
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
/* Two tries: without IO aliases, then with aliases */
for (try = 0; try < 2; try++) {
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i != CS_NO_MORE_ITEMS) {
if (i != CS_SUCCESS)
goto next_entry;
if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+ link->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && (cf->io.win[0].base != 0)) {
link->conf.ConfigIndex = cf->index;
link->io.BasePort1 = cf->io.win[0].base;
link->io.IOAddrLines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
goto found_port;
}
next_entry:
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
}
/* Second pass: try to find an entry that isn't picky about
its base address, then try to grab any standard serial port
address, and finally try to get any free port. */
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i != CS_NO_MORE_ITEMS) {
if ((i == CS_SUCCESS) && (cf->io.nwin > 0)
&& ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
@@ -717,30 +699,30 @@ next_entry:
for (j = 0; j < 5; j++) {
link->io.BasePort1 = base[j];
link->io.IOAddrLines = base[j] ? 16 : 3;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
goto found_port;
}
}
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
found_port:
if (i != CS_SUCCESS) {
BT_ERR("No usable port range found");
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
goto failed;
}
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIRQ, i);
+ cs_error(link, RequestIRQ, i);
link->irq.AssignedIRQ = 0;
}
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, i);
+ cs_error(link, RequestConfiguration, i);
goto failed;
}
@@ -748,58 +730,28 @@ found_port:
goto failed;
strcpy(info->node.dev_name, info->hdev->name);
- link->dev = &info->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &info->node;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
btuart_release(link);
+ return -ENODEV;
}
-static void btuart_release(dev_link_t *link)
+static void btuart_release(struct pcmcia_device *link)
{
btuart_info_t *info = link->priv;
- if (link->state & DEV_PRESENT)
- btuart_close(info);
-
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
-}
-
-static int btuart_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
+ btuart_close(info);
- return 0;
+ pcmcia_disable_device(link);
}
-static int btuart_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (DEV_OK(link))
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
-}
-
-
static struct pcmcia_device_id btuart_ids[] = {
/* don't use this driver. Use serial_cs + hci_uart instead */
PCMCIA_DEVICE_NULL
@@ -811,11 +763,9 @@ static struct pcmcia_driver btuart_driver = {
.drv = {
.name = "btuart_cs",
},
- .probe = btuart_attach,
+ .probe = btuart_probe,
.remove = btuart_detach,
.id_table = btuart_ids,
- .suspend = btuart_suspend,
- .resume = btuart_resume,
};
static int __init init_btuart_cs(void)
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 0449bc45ae5e5..a71a240611e0e 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -68,7 +68,7 @@ MODULE_LICENSE("GPL");
typedef struct dtl1_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct hci_dev *hdev;
@@ -87,8 +87,8 @@ typedef struct dtl1_info_t {
} dtl1_info_t;
-static void dtl1_config(dev_link_t *link);
-static void dtl1_release(dev_link_t *link);
+static int dtl1_config(struct pcmcia_device *link);
+static void dtl1_release(struct pcmcia_device *link);
static void dtl1_detach(struct pcmcia_device *p_dev);
@@ -153,13 +153,13 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
}
do {
- register unsigned int iobase = info->link.io.BasePort1;
+ register unsigned int iobase = info->p_dev->io.BasePort1;
register struct sk_buff *skb;
register int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
- if (!(info->link.state & DEV_PRESENT))
+ if (!pcmcia_dev_present(info->p_dev))
return;
if (!(skb = skb_dequeue(&(info->txq))))
@@ -218,7 +218,7 @@ static void dtl1_receive(dtl1_info_t *info)
return;
}
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
do {
info->hdev->stat.byte_rx++;
@@ -305,7 +305,7 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst, struct pt_regs *regs)
return IRQ_NONE;
}
- iobase = info->link.io.BasePort1;
+ iobase = info->p_dev->io.BasePort1;
spin_lock(&(info->lock));
@@ -458,7 +458,7 @@ static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long
static int dtl1_open(dtl1_info_t *info)
{
unsigned long flags;
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
struct hci_dev *hdev;
spin_lock_init(&(info->lock));
@@ -504,7 +504,7 @@ static int dtl1_open(dtl1_info_t *info)
outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR);
- info->ri_latch = inb(info->link.io.BasePort1 + UART_MSR) & UART_MSR_RI;
+ info->ri_latch = inb(info->p_dev->io.BasePort1 + UART_MSR) & UART_MSR_RI;
/* Turn on interrupts */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
@@ -529,7 +529,7 @@ static int dtl1_open(dtl1_info_t *info)
static int dtl1_close(dtl1_info_t *info)
{
unsigned long flags;
- unsigned int iobase = info->link.io.BasePort1;
+ unsigned int iobase = info->p_dev->io.BasePort1;
struct hci_dev *hdev = info->hdev;
if (!hdev)
@@ -555,17 +555,16 @@ static int dtl1_close(dtl1_info_t *info)
return 0;
}
-static int dtl1_attach(struct pcmcia_device *p_dev)
+static int dtl1_probe(struct pcmcia_device *link)
{
dtl1_info_t *info;
- dev_link_t *link;
/* Create new info device */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -577,31 +576,22 @@ static int dtl1_attach(struct pcmcia_device *p_dev)
link->irq.Instance = info;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- dtl1_config(link);
-
- return 0;
+ return dtl1_config(link);
}
-static void dtl1_detach(struct pcmcia_device *p_dev)
+static void dtl1_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
dtl1_info_t *info = link->priv;
- if (link->state & DEV_CONFIG)
- dtl1_release(link);
+ dtl1_release(link);
kfree(info);
}
-static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
int i;
@@ -612,29 +602,27 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS)
return CS_NO_MORE_ITEMS;
return get_tuple(handle, tuple, parse);
}
-static int next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS)
return CS_NO_MORE_ITEMS;
return get_tuple(handle, tuple, parse);
}
-static void dtl1_config(dev_link_t *link)
+static int dtl1_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
dtl1_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
cisparse_t parse;
cistpl_cftable_entry_t *cf = &parse.cftable_entry;
- config_info_t config;
int i, last_ret, last_fn;
tuple.TupleData = (cisdata_t *)buf;
@@ -644,7 +632,7 @@ static void dtl1_config(dev_link_t *link)
/* Get configuration register information */
tuple.DesiredTuple = CISTPL_CONFIG;
- last_ret = first_tuple(handle, &tuple, &parse);
+ last_ret = first_tuple(link, &tuple, &parse);
if (last_ret != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
@@ -652,11 +640,6 @@ static void dtl1_config(dev_link_t *link)
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
- i = pcmcia_get_configuration_info(handle, &config);
- link->conf.Vcc = config.Vcc;
-
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleOffset = 0;
tuple.TupleDataMax = 255;
@@ -665,34 +648,34 @@ static void dtl1_config(dev_link_t *link)
/* Look for a generic full-sized window */
link->io.NumPorts1 = 8;
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i != CS_NO_MORE_ITEMS) {
if ((i == CS_SUCCESS) && (cf->io.nwin == 1) && (cf->io.win[0].len > 8)) {
link->conf.ConfigIndex = cf->index;
link->io.BasePort1 = cf->io.win[0].base;
link->io.NumPorts1 = cf->io.win[0].len; /*yo */
link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
break;
}
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
goto failed;
}
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIRQ, i);
+ cs_error(link, RequestIRQ, i);
link->irq.AssignedIRQ = 0;
}
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, i);
+ cs_error(link, RequestConfiguration, i);
goto failed;
}
@@ -700,55 +683,26 @@ static void dtl1_config(dev_link_t *link)
goto failed;
strcpy(info->node.dev_name, info->hdev->name);
- link->dev = &info->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &info->node;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
dtl1_release(link);
+ return -ENODEV;
}
-static void dtl1_release(dev_link_t *link)
+static void dtl1_release(struct pcmcia_device *link)
{
dtl1_info_t *info = link->priv;
- if (link->state & DEV_PRESENT)
- dtl1_close(info);
-
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
-}
-
-static int dtl1_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int dtl1_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
+ dtl1_close(info);
- link->state &= ~DEV_SUSPEND;
- if (DEV_OK(link))
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
+ pcmcia_disable_device(link);
}
@@ -765,11 +719,9 @@ static struct pcmcia_driver dtl1_driver = {
.drv = {
.name = "dtl1_cs",
},
- .probe = dtl1_attach,
+ .probe = dtl1_probe,
.remove = dtl1_detach,
.id_table = dtl1_ids,
- .suspend = dtl1_suspend,
- .resume = dtl1_resume,
};
static int __init init_dtl1_cs(void)
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 327b00c3c45ea..8d97b3911293b 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -904,7 +904,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
* It is possible the vty-server was removed after the irq was
* requested but before we have time to enable interrupts.
*/
- if (vio_enable_interrupts(vdev) == H_Success)
+ if (vio_enable_interrupts(vdev) == H_SUCCESS)
return 0;
else {
printk(KERN_ERR "HVCS: int enable failed for"
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 932feedda2622..e1c95374984cc 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -42,7 +42,7 @@
#include <linux/slab.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/ipmi.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/compat.h>
@@ -55,7 +55,7 @@ struct ipmi_file_private
struct file *file;
struct fasync_struct *fasync_queue;
wait_queue_head_t wait;
- struct semaphore recv_sem;
+ struct mutex recv_mutex;
int default_retries;
unsigned int default_retry_time_ms;
};
@@ -141,7 +141,7 @@ static int ipmi_open(struct inode *inode, struct file *file)
INIT_LIST_HEAD(&(priv->recv_msgs));
init_waitqueue_head(&priv->wait);
priv->fasync_queue = NULL;
- sema_init(&(priv->recv_sem), 1);
+ mutex_init(&priv->recv_mutex);
/* Use the low-level defaults. */
priv->default_retries = -1;
@@ -285,15 +285,15 @@ static int ipmi_ioctl(struct inode *inode,
break;
}
- /* We claim a semaphore because we don't want two
+ /* We claim a mutex because we don't want two
users getting something from the queue at a time.
Since we have to release the spinlock before we can
copy the data to the user, it's possible another
user will grab something from the queue, too. Then
the messages might get out of order if something
fails and the message gets put back onto the
- queue. This semaphore prevents that problem. */
- down(&(priv->recv_sem));
+ queue. This mutex prevents that problem. */
+ mutex_lock(&priv->recv_mutex);
/* Grab the message off the list. */
spin_lock_irqsave(&(priv->recv_msg_lock), flags);
@@ -352,7 +352,7 @@ static int ipmi_ioctl(struct inode *inode,
goto recv_putback_on_err;
}
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
ipmi_free_recv_msg(msg);
break;
@@ -362,11 +362,11 @@ static int ipmi_ioctl(struct inode *inode,
spin_lock_irqsave(&(priv->recv_msg_lock), flags);
list_add(entry, &(priv->recv_msgs));
spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
break;
recv_err:
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
break;
}
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index da1554194d3db..2062675f9e998 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -227,7 +227,7 @@ static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
long time)
{
- if (! GET_STATUS_OBF(status)) {
+ if (!GET_STATUS_OBF(status)) {
kcs->obf_timeout -= time;
if (kcs->obf_timeout < 0) {
start_error_recovery(kcs, "OBF not ready in time");
@@ -407,7 +407,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (state == KCS_READ_STATE) {
- if (! check_obf(kcs, status, time))
+ if (!check_obf(kcs, status, time))
return SI_SM_CALL_WITH_DELAY;
read_next_byte(kcs);
} else {
@@ -447,7 +447,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
"Not in read state for error2");
break;
}
- if (! check_obf(kcs, status, time))
+ if (!check_obf(kcs, status, time))
return SI_SM_CALL_WITH_DELAY;
clear_obf(kcs, status);
@@ -462,7 +462,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
break;
}
- if (! check_obf(kcs, status, time))
+ if (!check_obf(kcs, status, time))
return SI_SM_CALL_WITH_DELAY;
clear_obf(kcs, status);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 40eb005b9d77a..0ded046d5aa80 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,6 +38,7 @@
#include <linux/sched.h>
#include <linux/poll.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
@@ -234,7 +235,7 @@ struct ipmi_smi
/* The list of command receivers that are registered for commands
on this interface. */
- struct semaphore cmd_rcvrs_lock;
+ struct mutex cmd_rcvrs_mutex;
struct list_head cmd_rcvrs;
/* Events that were queues because no one was there to receive
@@ -387,10 +388,10 @@ static void clean_up_interface_data(ipmi_smi_t intf)
/* Wholesale remove all the entries from the list in the
* interface and wait for RCU to know that none are in use. */
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
list_add_rcu(&list, &intf->cmd_rcvrs);
list_del_rcu(&intf->cmd_rcvrs);
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
list_for_each_entry_safe(rcvr, rcvr2, &list, link)
@@ -557,7 +558,7 @@ unsigned int ipmi_addr_length(int addr_type)
static void deliver_response(struct ipmi_recv_msg *msg)
{
- if (! msg->user) {
+ if (!msg->user) {
ipmi_smi_t intf = msg->user_msg_data;
unsigned long flags;
@@ -598,11 +599,11 @@ static int intf_next_seq(ipmi_smi_t intf,
(i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
i = (i+1)%IPMI_IPMB_NUM_SEQ)
{
- if (! intf->seq_table[i].inuse)
+ if (!intf->seq_table[i].inuse)
break;
}
- if (! intf->seq_table[i].inuse) {
+ if (!intf->seq_table[i].inuse) {
intf->seq_table[i].recv_msg = recv_msg;
/* Start with the maximum timeout, when the send response
@@ -763,7 +764,7 @@ int ipmi_create_user(unsigned int if_num,
}
new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
- if (! new_user)
+ if (!new_user)
return -ENOMEM;
spin_lock_irqsave(&interfaces_lock, flags);
@@ -819,14 +820,13 @@ static void free_user(struct kref *ref)
int ipmi_destroy_user(ipmi_user_t user)
{
- int rv = -ENODEV;
ipmi_smi_t intf = user->intf;
int i;
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- user->valid = 1;
+ user->valid = 0;
/* Remove the user from the interface's sequence table. */
spin_lock_irqsave(&intf->seq_lock, flags);
@@ -847,7 +847,7 @@ int ipmi_destroy_user(ipmi_user_t user)
* since other things may be using it till we do
* synchronize_rcu()) then free everything in that list.
*/
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
if (rcvr->user == user) {
list_del_rcu(&rcvr->link);
@@ -855,7 +855,7 @@ int ipmi_destroy_user(ipmi_user_t user)
rcvrs = rcvr;
}
}
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
@@ -871,7 +871,7 @@ int ipmi_destroy_user(ipmi_user_t user)
kref_put(&user->refcount, free_user);
- return rv;
+ return 0;
}
void ipmi_get_version(ipmi_user_t user,
@@ -936,7 +936,8 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
if (val) {
/* Deliver any queued events. */
- list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) {
+ list_for_each_entry_safe(msg, msg2, &intf->waiting_events,
+ link) {
list_del(&msg->link);
list_add_tail(&msg->link, &msgs);
}
@@ -978,13 +979,13 @@ int ipmi_register_for_cmd(ipmi_user_t user,
rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
- if (! rcvr)
+ if (!rcvr)
return -ENOMEM;
rcvr->cmd = cmd;
rcvr->netfn = netfn;
rcvr->user = user;
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
/* Make sure the command/netfn is not already registered. */
entry = find_cmd_rcvr(intf, netfn, cmd);
if (entry) {
@@ -995,7 +996,7 @@ int ipmi_register_for_cmd(ipmi_user_t user,
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
out_unlock:
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
if (rv)
kfree(rcvr);
@@ -1009,17 +1010,17 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
ipmi_smi_t intf = user->intf;
struct cmd_rcvr *rcvr;
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
/* Make sure the command/netfn is not already registered. */
rcvr = find_cmd_rcvr(intf, netfn, cmd);
if ((rcvr) && (rcvr->user == user)) {
list_del_rcu(&rcvr->link);
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
kfree(rcvr);
return 0;
} else {
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
return -ENOENT;
}
}
@@ -1514,7 +1515,7 @@ int ipmi_request_settime(ipmi_user_t user,
unsigned char saddr, lun;
int rv;
- if (! user)
+ if (!user)
return -EINVAL;
rv = check_addr(user->intf, addr, &saddr, &lun);
if (rv)
@@ -1545,7 +1546,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
unsigned char saddr, lun;
int rv;
- if (! user)
+ if (!user)
return -EINVAL;
rv = check_addr(user->intf, addr, &saddr, &lun);
if (rv)
@@ -1570,7 +1571,7 @@ static int ipmb_file_read_proc(char *page, char **start, off_t off,
char *out = (char *) page;
ipmi_smi_t intf = data;
int i;
- int rv= 0;
+ int rv = 0;
for (i = 0; i < IPMI_MAX_CHANNELS; i++)
rv += sprintf(out+rv, "%x ", intf->channels[i].address);
@@ -1989,7 +1990,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
} else {
bmc->dev = platform_device_alloc("ipmi_bmc",
bmc->id.device_id);
- if (! bmc->dev) {
+ if (!bmc->dev) {
printk(KERN_ERR
"ipmi_msghandler:"
" Unable to allocate platform device\n");
@@ -2305,8 +2306,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *si_dev,
- unsigned char slave_addr,
- ipmi_smi_t *new_intf)
+ unsigned char slave_addr)
{
int i, j;
int rv;
@@ -2366,7 +2366,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
spin_lock_init(&intf->events_lock);
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
- init_MUTEX(&intf->cmd_rcvrs_lock);
+ mutex_init(&intf->cmd_rcvrs_mutex);
INIT_LIST_HEAD(&intf->cmd_rcvrs);
init_waitqueue_head(&intf->waitq);
@@ -2388,9 +2388,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
if (rv)
goto out;
- /* FIXME - this is an ugly kludge, this sets the intf for the
- caller before sending any messages with it. */
- *new_intf = intf;
+ rv = handlers->start_processing(send_info, intf);
+ if (rv)
+ goto out;
get_guid(intf);
@@ -2622,7 +2622,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
spin_unlock_irqrestore(&intf->counter_lock, flags);
recv_msg = ipmi_alloc_recv_msg();
- if (! recv_msg) {
+ if (!recv_msg) {
/* We couldn't allocate memory for the
message, so requeue it for handling
later. */
@@ -2777,7 +2777,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
spin_unlock_irqrestore(&intf->counter_lock, flags);
recv_msg = ipmi_alloc_recv_msg();
- if (! recv_msg) {
+ if (!recv_msg) {
/* We couldn't allocate memory for the
message, so requeue it for handling
later. */
@@ -2869,13 +2869,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
events. */
rcu_read_lock();
list_for_each_entry_rcu(user, &intf->users, link) {
- if (! user->gets_events)
+ if (!user->gets_events)
continue;
recv_msg = ipmi_alloc_recv_msg();
- if (! recv_msg) {
+ if (!recv_msg) {
rcu_read_unlock();
- list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+ link) {
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
}
@@ -2905,7 +2906,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
/* No one to receive the message, put it in queue if there's
not already too many things in the queue. */
recv_msg = ipmi_alloc_recv_msg();
- if (! recv_msg) {
+ if (!recv_msg) {
/* We couldn't allocate memory for the
message, so requeue it for handling
later. */
@@ -3190,7 +3191,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
rcu_read_lock();
list_for_each_entry_rcu(user, &intf->users, link) {
- if (! user->handler->ipmi_watchdog_pretimeout)
+ if (!user->handler->ipmi_watchdog_pretimeout)
continue;
user->handler->ipmi_watchdog_pretimeout(user->handler_data);
@@ -3278,7 +3279,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
ent->seqid);
- if (! smi_msg)
+ if (!smi_msg)
return;
spin_unlock_irqrestore(&intf->seq_lock, *flags);
@@ -3314,8 +3315,9 @@ static void ipmi_timeout_handler(long timeout_period)
/* See if any waiting messages need to be processed. */
spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) {
- if (! handle_new_recv_msg(intf, smi_msg)) {
+ list_for_each_entry_safe(smi_msg, smi_msg2,
+ &intf->waiting_msgs, link) {
+ if (!handle_new_recv_msg(intf, smi_msg)) {
list_del(&smi_msg->link);
ipmi_free_smi_msg(smi_msg);
} else {
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 786a2802ca340..d0b5c08e7b4ea 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -346,7 +346,7 @@ static int ipmi_dell_chassis_detect (ipmi_user_t user)
{
const char ipmi_version_major = ipmi_version & 0xF;
const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
- const char mfr[3]=DELL_IANA_MFR_ID;
+ const char mfr[3] = DELL_IANA_MFR_ID;
if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
ipmi_version_major <= 1 &&
ipmi_version_minor < 5)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 35fbd4d8ed4b5..a86c0f29953e7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -803,7 +803,7 @@ static int ipmi_thread(void *data)
set_user_nice(current, 19);
while (!kthread_should_stop()) {
spin_lock_irqsave(&(smi_info->si_lock), flags);
- smi_result=smi_event_handler(smi_info, 0);
+ smi_result = smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
/* do nothing */
@@ -972,10 +972,37 @@ static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
return si_irq_handler(irq, data, regs);
}
+static int smi_start_processing(void *send_info,
+ ipmi_smi_t intf)
+{
+ struct smi_info *new_smi = send_info;
+
+ new_smi->intf = intf;
+
+ /* Set up the timer that drives the interface. */
+ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ new_smi->last_timeout_jiffies = jiffies;
+ mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (new_smi->si_type != SI_BT) {
+ new_smi->thread = kthread_run(ipmi_thread, new_smi,
+ "kipmi%d", new_smi->intf_num);
+ if (IS_ERR(new_smi->thread)) {
+ printk(KERN_NOTICE "ipmi_si_intf: Could not start"
+ " kernel thread due to error %ld, only using"
+ " timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
+ new_smi->thread = NULL;
+ }
+ }
+
+ return 0;
+}
static struct ipmi_smi_handlers handlers =
{
.owner = THIS_MODULE,
+ .start_processing = smi_start_processing,
.sender = sender,
.request_events = request_events,
.set_run_to_completion = set_run_to_completion,
@@ -987,7 +1014,7 @@ static struct ipmi_smi_handlers handlers =
#define SI_MAX_PARMS 4
static LIST_HEAD(smi_infos);
-static DECLARE_MUTEX(smi_infos_lock);
+static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
@@ -2162,9 +2189,13 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
{
- if (smi_info->thread != NULL && smi_info->thread != ERR_PTR(-ENOMEM))
- kthread_stop(smi_info->thread);
- del_timer_sync(&smi_info->si_timer);
+ if (smi_info->intf) {
+ /* The timer and thread are only running if the
+ interface has been started up and registered. */
+ if (smi_info->thread != NULL)
+ kthread_stop(smi_info->thread);
+ del_timer_sync(&smi_info->si_timer);
+ }
}
static struct ipmi_default_vals
@@ -2245,7 +2276,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->slave_addr, new_smi->irq);
}
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
printk(KERN_WARNING "ipmi_si: duplicate interface\n");
rv = -EBUSY;
@@ -2341,21 +2372,6 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->irq)
new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
- /* The ipmi_register_smi() code does some operations to
- determine the channel information, so we must be ready to
- handle operations before it is called. This means we have
- to stop the timer if we get an error after this point. */
- init_timer(&(new_smi->si_timer));
- new_smi->si_timer.data = (long) new_smi;
- new_smi->si_timer.function = smi_timeout;
- new_smi->last_timeout_jiffies = jiffies;
- new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
-
- add_timer(&(new_smi->si_timer));
- if (new_smi->si_type != SI_BT)
- new_smi->thread = kthread_run(ipmi_thread, new_smi,
- "kipmi%d", new_smi->intf_num);
-
if (!new_smi->dev) {
/* If we don't already have a device from something
* else (like PCI), then register a new one. */
@@ -2365,7 +2381,7 @@ static int try_smi_init(struct smi_info *new_smi)
printk(KERN_ERR
"ipmi_si_intf:"
" Unable to allocate platform device\n");
- goto out_err_stop_timer;
+ goto out_err;
}
new_smi->dev = &new_smi->pdev->dev;
new_smi->dev->driver = &ipmi_driver;
@@ -2377,7 +2393,7 @@ static int try_smi_init(struct smi_info *new_smi)
" Unable to register system interface device:"
" %d\n",
rv);
- goto out_err_stop_timer;
+ goto out_err;
}
new_smi->dev_registered = 1;
}
@@ -2386,8 +2402,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi,
&new_smi->device_id,
new_smi->dev,
- new_smi->slave_addr,
- &(new_smi->intf));
+ new_smi->slave_addr);
if (rv) {
printk(KERN_ERR
"ipmi_si: Unable to register device: error %d\n",
@@ -2417,7 +2432,7 @@ static int try_smi_init(struct smi_info *new_smi)
list_add_tail(&new_smi->link, &smi_infos);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
@@ -2454,7 +2469,7 @@ static int try_smi_init(struct smi_info *new_smi)
kfree(new_smi);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
return rv;
}
@@ -2512,26 +2527,26 @@ static __devinit int init_ipmi_si(void)
#endif
if (si_trydefaults) {
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
/* No BMC was found, try defaults. */
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
default_find_bmc();
} else {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
}
}
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
printk("ipmi_si: Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
return 0;
}
}
@@ -2607,10 +2622,10 @@ static __exit void cleanup_ipmi_si(void)
pci_unregister_driver(&ipmi_pci_driver);
#endif
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
driver_unregister(&ipmi_driver);
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 7ece9f3c8f706..2d11ddd99e55e 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -39,6 +39,7 @@
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
+#include <linux/completion.h>
#include <linux/rwsem.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
@@ -303,21 +304,22 @@ static int ipmi_heartbeat(void);
static void panic_halt_ipmi_heartbeat(void);
-/* We use a semaphore to make sure that only one thing can send a set
+/* We use a mutex to make sure that only one thing can send a set
timeout at one time, because we only have one copy of the data.
- The semaphore is claimed when the set_timeout is sent and freed
+ The mutex is claimed when the set_timeout is sent and freed
when both messages are free. */
static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
-static DECLARE_MUTEX(set_timeout_lock);
+static DEFINE_MUTEX(set_timeout_lock);
+static DECLARE_COMPLETION(set_timeout_wait);
static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
{
if (atomic_dec_and_test(&set_timeout_tofree))
- up(&set_timeout_lock);
+ complete(&set_timeout_wait);
}
static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
{
if (atomic_dec_and_test(&set_timeout_tofree))
- up(&set_timeout_lock);
+ complete(&set_timeout_wait);
}
static struct ipmi_smi_msg set_timeout_smi_msg =
{
@@ -399,7 +401,7 @@ static int ipmi_set_timeout(int do_heartbeat)
/* We can only send one of these at a time. */
- down(&set_timeout_lock);
+ mutex_lock(&set_timeout_lock);
atomic_set(&set_timeout_tofree, 2);
@@ -407,16 +409,21 @@ static int ipmi_set_timeout(int do_heartbeat)
&set_timeout_recv_msg,
&send_heartbeat_now);
if (rv) {
- up(&set_timeout_lock);
- } else {
- if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
- || ((send_heartbeat_now)
- && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
- {
- rv = ipmi_heartbeat();
- }
+ mutex_unlock(&set_timeout_lock);
+ goto out;
}
+ wait_for_completion(&set_timeout_wait);
+
+ if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+ || ((send_heartbeat_now)
+ && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+ {
+ rv = ipmi_heartbeat();
+ }
+ mutex_unlock(&set_timeout_lock);
+
+out:
return rv;
}
@@ -458,17 +465,17 @@ static void panic_halt_ipmi_set_timeout(void)
The semaphore is claimed when the set_timeout is sent and freed
when both messages are free. */
static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
-static DECLARE_MUTEX(heartbeat_lock);
-static DECLARE_MUTEX_LOCKED(heartbeat_wait_lock);
+static DEFINE_MUTEX(heartbeat_lock);
+static DECLARE_COMPLETION(heartbeat_wait);
static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
{
if (atomic_dec_and_test(&heartbeat_tofree))
- up(&heartbeat_wait_lock);
+ complete(&heartbeat_wait);
}
static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
{
if (atomic_dec_and_test(&heartbeat_tofree))
- up(&heartbeat_wait_lock);
+ complete(&heartbeat_wait);
}
static struct ipmi_smi_msg heartbeat_smi_msg =
{
@@ -511,14 +518,14 @@ static int ipmi_heartbeat(void)
return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
}
- down(&heartbeat_lock);
+ mutex_lock(&heartbeat_lock);
atomic_set(&heartbeat_tofree, 2);
/* Don't reset the timer if we have the timer turned off, that
re-enables the watchdog. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
return 0;
}
@@ -539,14 +546,14 @@ static int ipmi_heartbeat(void)
&heartbeat_recv_msg,
1);
if (rv) {
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
printk(KERN_WARNING PFX "heartbeat failure: %d\n",
rv);
return rv;
}
/* Wait for the heartbeat to be sent. */
- down(&heartbeat_wait_lock);
+ wait_for_completion(&heartbeat_wait);
if (heartbeat_recv_msg.msg.data[0] != 0) {
/* Got an error in the heartbeat response. It was already
@@ -555,7 +562,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
return rv;
}
@@ -589,7 +596,7 @@ static void panic_halt_ipmi_heartbeat(void)
1);
}
-static struct watchdog_info ident=
+static struct watchdog_info ident =
{
.options = 0, /* WDIOF_SETTIMEOUT, */
.firmware_version = 1,
@@ -790,13 +797,13 @@ static int ipmi_fasync(int fd, struct file *file, int on)
static int ipmi_close(struct inode *ino, struct file *filep)
{
- if (iminor(ino)==WATCHDOG_MINOR)
- {
+ if (iminor(ino) == WATCHDOG_MINOR) {
if (expect_close == 42) {
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
} else {
- printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
+ printk(KERN_CRIT PFX
+ "Unexpected close, not stopping watchdog!\n");
ipmi_heartbeat();
}
clear_bit(0, &ipmi_wdog_open);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index e5247f85a446e..ef20c1fc9c4c1 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -706,7 +706,6 @@ static int stli_portcmdstats(stliport_t *portp);
static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp);
static int stli_getportstruct(stliport_t __user *arg);
static int stli_getbrdstruct(stlibrd_t __user *arg);
-static void *stli_memalloc(int len);
static stlibrd_t *stli_allocbrd(void);
static void stli_ecpinit(stlibrd_t *brdp);
@@ -997,17 +996,6 @@ static int stli_parsebrd(stlconf_t *confp, char **argp)
/*****************************************************************************/
-/*
- * Local driver kernel malloc routine.
- */
-
-static void *stli_memalloc(int len)
-{
- return((void *) kmalloc(len, GFP_KERNEL));
-}
-
-/*****************************************************************************/
-
static int stli_open(struct tty_struct *tty, struct file *filp)
{
stlibrd_t *brdp;
@@ -3227,13 +3215,12 @@ static int stli_initports(stlibrd_t *brdp)
#endif
for (i = 0, panelnr = 0, panelport = 0; (i < brdp->nrports); i++) {
- portp = (stliport_t *) stli_memalloc(sizeof(stliport_t));
- if (portp == (stliport_t *) NULL) {
+ portp = kzalloc(sizeof(stliport_t), GFP_KERNEL);
+ if (!portp) {
printk("STALLION: failed to allocate port structure\n");
continue;
}
- memset(portp, 0, sizeof(stliport_t));
portp->magic = STLI_PORTMAGIC;
portp->portnr = i;
portp->brdnr = brdp->brdnr;
@@ -4610,14 +4597,13 @@ static stlibrd_t *stli_allocbrd(void)
{
stlibrd_t *brdp;
- brdp = (stlibrd_t *) stli_memalloc(sizeof(stlibrd_t));
- if (brdp == (stlibrd_t *) NULL) {
+ brdp = kzalloc(sizeof(stlibrd_t), GFP_KERNEL);
+ if (!brdp) {
printk(KERN_ERR "STALLION: failed to allocate memory "
"(size=%d)\n", sizeof(stlibrd_t));
- return((stlibrd_t *) NULL);
+ return NULL;
}
- memset(brdp, 0, sizeof(stlibrd_t));
brdp->magic = STLI_BOARDMAGIC;
return(brdp);
}
@@ -5210,12 +5196,12 @@ int __init stli_init(void)
/*
* Allocate a temporary write buffer.
*/
- stli_tmpwritebuf = (char *) stli_memalloc(STLI_TXBUFSIZE);
- if (stli_tmpwritebuf == (char *) NULL)
+ stli_tmpwritebuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
+ if (!stli_tmpwritebuf)
printk(KERN_ERR "STALLION: failed to allocate memory "
"(size=%d)\n", STLI_TXBUFSIZE);
- stli_txcookbuf = stli_memalloc(STLI_TXBUFSIZE);
- if (stli_txcookbuf == (char *) NULL)
+ stli_txcookbuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
+ if (!stli_txcookbuf)
printk(KERN_ERR "STALLION: failed to allocate memory "
"(size=%d)\n", STLI_TXBUFSIZE);
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 8b603b2d1c421..935670a3cd987 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -74,7 +74,7 @@ void compute_shiftstate(void);
k_self, k_fn, k_spec, k_pad,\
k_dead, k_cons, k_cur, k_shift,\
k_meta, k_ascii, k_lock, k_lowercase,\
- k_slock, k_dead2, k_ignore, k_ignore
+ k_slock, k_dead2, k_brl, k_ignore
typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
char up_flag, struct pt_regs *regs);
@@ -100,7 +100,7 @@ static fn_handler_fn *fn_handler[] = { FN_HANDLERS };
const int max_vals[] = {
255, ARRAY_SIZE(func_table) - 1, ARRAY_SIZE(fn_handler) - 1, NR_PAD - 1,
NR_DEAD - 1, 255, 3, NR_SHIFT - 1, 255, NR_ASCII - 1, NR_LOCK - 1,
- 255, NR_LOCK - 1, 255
+ 255, NR_LOCK - 1, 255, NR_BRL - 1
};
const int NR_TYPES = ARRAY_SIZE(max_vals);
@@ -126,7 +126,7 @@ static unsigned long key_down[NBITS(KEY_MAX)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static int dead_key_next;
static int npadch = -1; /* -1 or number assembled on pad */
-static unsigned char diacr;
+static unsigned int diacr;
static char rep; /* flag telling character repeat */
static unsigned char ledstate = 0xff; /* undefined */
@@ -394,22 +394,30 @@ void compute_shiftstate(void)
* Otherwise, conclude that DIACR was not combining after all,
* queue it and return CH.
*/
-static unsigned char handle_diacr(struct vc_data *vc, unsigned char ch)
+static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
{
- int d = diacr;
+ unsigned int d = diacr;
unsigned int i;
diacr = 0;
- for (i = 0; i < accent_table_size; i++) {
- if (accent_table[i].diacr == d && accent_table[i].base == ch)
- return accent_table[i].result;
+ if ((d & ~0xff) == BRL_UC_ROW) {
+ if ((ch & ~0xff) == BRL_UC_ROW)
+ return d | ch;
+ } else {
+ for (i = 0; i < accent_table_size; i++)
+ if (accent_table[i].diacr == d && accent_table[i].base == ch)
+ return accent_table[i].result;
}
- if (ch == ' ' || ch == d)
+ if (ch == ' ' || ch == (BRL_UC_ROW|0) || ch == d)
return d;
- put_queue(vc, d);
+ if (kbd->kbdmode == VC_UNICODE)
+ to_utf8(vc, d);
+ else if (d < 0x100)
+ put_queue(vc, d);
+
return ch;
}
@@ -419,7 +427,10 @@ static unsigned char handle_diacr(struct vc_data *vc, unsigned char ch)
static void fn_enter(struct vc_data *vc, struct pt_regs *regs)
{
if (diacr) {
- put_queue(vc, diacr);
+ if (kbd->kbdmode == VC_UNICODE)
+ to_utf8(vc, diacr);
+ else if (diacr < 0x100)
+ put_queue(vc, diacr);
diacr = 0;
}
put_queue(vc, 13);
@@ -615,7 +626,7 @@ static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag, s
printk(KERN_ERR "keyboard.c: k_lowercase was called - impossible\n");
}
-static void k_self(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
+static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag, struct pt_regs *regs)
{
if (up_flag)
return; /* no action, if this is a key release */
@@ -628,7 +639,10 @@ static void k_self(struct vc_data *vc, unsigned char value, char up_flag, struct
diacr = value;
return;
}
- put_queue(vc, value);
+ if (kbd->kbdmode == VC_UNICODE)
+ to_utf8(vc, value);
+ else if (value < 0x100)
+ put_queue(vc, value);
}
/*
@@ -636,13 +650,23 @@ static void k_self(struct vc_data *vc, unsigned char value, char up_flag, struct
* dead keys modifying the same character. Very useful
* for Vietnamese.
*/
-static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
+static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag, struct pt_regs *regs)
{
if (up_flag)
return;
diacr = (diacr ? handle_diacr(vc, value) : value);
}
+static void k_self(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
+{
+ k_unicode(vc, value, up_flag, regs);
+}
+
+static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
+{
+ k_deadunicode(vc, value, up_flag, regs);
+}
+
/*
* Obsolete - for backwards compatibility only
*/
@@ -650,7 +674,7 @@ static void k_dead(struct vc_data *vc, unsigned char value, char up_flag, struct
{
static unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
value = ret_diacr[value];
- k_dead2(vc, value, up_flag, regs);
+ k_deadunicode(vc, value, up_flag, regs);
}
static void k_cons(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
@@ -835,6 +859,62 @@ static void k_slock(struct vc_data *vc, unsigned char value, char up_flag, struc
}
}
+/* by default, 300ms interval for combination release */
+static long brl_timeout = 300;
+MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for combination on first release, < 0 for dead characters)");
+module_param(brl_timeout, long, 0644);
+static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
+{
+ static unsigned pressed,committing;
+ static unsigned long releasestart;
+
+ if (kbd->kbdmode != VC_UNICODE) {
+ if (!up_flag)
+ printk("keyboard mode must be unicode for braille patterns\n");
+ return;
+ }
+
+ if (!value) {
+ k_unicode(vc, BRL_UC_ROW, up_flag, regs);
+ return;
+ }
+
+ if (value > 8)
+ return;
+
+ if (brl_timeout < 0) {
+ k_deadunicode(vc, BRL_UC_ROW | (1 << (value - 1)), up_flag, regs);
+ return;
+ }
+
+ if (up_flag) {
+ if (brl_timeout) {
+ if (!committing ||
+ jiffies - releasestart > (brl_timeout * HZ) / 1000) {
+ committing = pressed;
+ releasestart = jiffies;
+ }
+ pressed &= ~(1 << (value - 1));
+ if (!pressed) {
+ if (committing) {
+ k_unicode(vc, BRL_UC_ROW | committing, 0, regs);
+ committing = 0;
+ }
+ }
+ } else {
+ if (committing) {
+ k_unicode(vc, BRL_UC_ROW | committing, 0, regs);
+ committing = 0;
+ }
+ pressed &= ~(1 << (value - 1));
+ }
+ } else {
+ pressed |= 1 << (value - 1);
+ if (!brl_timeout)
+ committing = pressed;
+ }
+}
+
/*
* The leds display either (i) the status of NumLock, CapsLock, ScrollLock,
* or (ii) whatever pattern of lights people want to show using KDSETLED,
@@ -1125,9 +1205,13 @@ static void kbd_keycode(unsigned int keycode, int down,
}
if (keycode > NR_KEYS)
- return;
+ if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
+ keysym = K(KT_BRL, keycode - KEY_BRL_DOT1 + 1);
+ else
+ return;
+ else
+ keysym = key_map[keycode];
- keysym = key_map[keycode];
type = KTYP(keysym);
if (type < 0xf0) {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 5fdf185154330..02114a0bd0d9d 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -46,7 +46,7 @@
/* #define ATR_CSUM */
#ifdef PCMCIA_DEBUG
-#define reader_to_dev(x) (&handle_to_dev(x->link.handle))
+#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle))
static int pc_debug = PCMCIA_DEBUG;
module_param(pc_debug, int, 0600);
#define DEBUGP(n, rdr, x, args...) do { \
@@ -67,7 +67,7 @@ static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte";
#define T_100MSEC msecs_to_jiffies(100)
#define T_500MSEC msecs_to_jiffies(500)
-static void cm4000_release(dev_link_t *link);
+static void cm4000_release(struct pcmcia_device *link);
static int major; /* major number we get from the kernel */
@@ -106,7 +106,7 @@ static int major; /* major number we get from the kernel */
#define REG_STOPBITS(x) (x + 7)
struct cm4000_dev {
- dev_link_t link; /* pcmcia link */
+ struct pcmcia_device *p_dev;
dev_node_t node; /* OS node (major,minor) */
unsigned char atr[MAX_ATR];
@@ -149,14 +149,14 @@ struct cm4000_dev {
#define ZERO_DEV(dev) \
memset(&dev->atr_csum,0, \
sizeof(struct cm4000_dev) - \
- /*link*/ sizeof(dev_link_t) - \
+ /*link*/ sizeof(struct pcmcia_device) - \
/*node*/ sizeof(dev_node_t) - \
/*atr*/ MAX_ATR*sizeof(char) - \
/*rbuf*/ 512*sizeof(char) - \
/*sbuf*/ 512*sizeof(char) - \
/*queue*/ 4*sizeof(wait_queue_head_t))
-static dev_link_t *dev_table[CM4000_MAX_DEV];
+static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
static struct class *cmm_class;
/* This table doesn't use spaces after the comma between fields and thus
@@ -454,7 +454,7 @@ static struct card_fixup card_fixups[] = {
static void set_cardparameter(struct cm4000_dev *dev)
{
int i;
- ioaddr_t iobase = dev->link.io.BasePort1;
+ ioaddr_t iobase = dev->p_dev->io.BasePort1;
u_int8_t stopbits = 0x02; /* ISO default */
DEBUGP(3, dev, "-> set_cardparameter\n");
@@ -487,7 +487,7 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
unsigned short num_bytes_read;
unsigned char pts_reply[4];
ssize_t rc;
- ioaddr_t iobase = dev->link.io.BasePort1;
+ ioaddr_t iobase = dev->p_dev->io.BasePort1;
rc = 0;
@@ -699,7 +699,7 @@ static void terminate_monitor(struct cm4000_dev *dev)
static void monitor_card(unsigned long p)
{
struct cm4000_dev *dev = (struct cm4000_dev *) p;
- ioaddr_t iobase = dev->link.io.BasePort1;
+ ioaddr_t iobase = dev->p_dev->io.BasePort1;
unsigned short s;
struct ptsreq ptsreq;
int i, atrc;
@@ -962,7 +962,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
loff_t *ppos)
{
struct cm4000_dev *dev = filp->private_data;
- ioaddr_t iobase = dev->link.io.BasePort1;
+ ioaddr_t iobase = dev->p_dev->io.BasePort1;
ssize_t rc;
int i, j, k;
@@ -971,7 +971,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
if (count == 0) /* according to manpage */
return 0;
- if ((dev->link.state & DEV_PRESENT) == 0 || /* socket removed */
+ if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
test_bit(IS_CMM_ABSENT, &dev->flags))
return -ENODEV;
@@ -1083,7 +1083,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct cm4000_dev *dev = (struct cm4000_dev *) filp->private_data;
- ioaddr_t iobase = dev->link.io.BasePort1;
+ ioaddr_t iobase = dev->p_dev->io.BasePort1;
unsigned short s;
unsigned char tmp;
unsigned char infolen;
@@ -1108,7 +1108,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0;
- if ((dev->link.state & DEV_PRESENT) == 0 || /* socket removed */
+ if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
test_bit(IS_CMM_ABSENT, &dev->flags))
return -ENODEV;
@@ -1440,8 +1440,8 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct cm4000_dev *dev = filp->private_data;
- ioaddr_t iobase = dev->link.io.BasePort1;
- dev_link_t *link;
+ ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ struct pcmcia_device *link;
int size;
int rc;
void __user *argp = (void __user *)arg;
@@ -1458,7 +1458,7 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
iminor(inode), ioctl_names[_IOC_NR(cmd)]);
link = dev_table[iminor(inode)];
- if (!(DEV_OK(link))) {
+ if (!pcmcia_dev_present(link)) {
DEBUGP(4, dev, "DEV_OK false\n");
return -ENODEV;
}
@@ -1660,14 +1660,14 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
static int cmm_open(struct inode *inode, struct file *filp)
{
struct cm4000_dev *dev;
- dev_link_t *link;
+ struct pcmcia_device *link;
int rc, minor = iminor(inode);
if (minor >= CM4000_MAX_DEV)
return -ENODEV;
link = dev_table[minor];
- if (link == NULL || !(DEV_OK(link)))
+ if (link == NULL || !pcmcia_dev_present(link))
return -ENODEV;
if (link->open)
@@ -1709,7 +1709,7 @@ static int cmm_open(struct inode *inode, struct file *filp)
static int cmm_close(struct inode *inode, struct file *filp)
{
struct cm4000_dev *dev;
- dev_link_t *link;
+ struct pcmcia_device *link;
int minor = iminor(inode);
if (minor >= CM4000_MAX_DEV)
@@ -1735,7 +1735,7 @@ static int cmm_close(struct inode *inode, struct file *filp)
return 0;
}
-static void cmm_cm4000_release(dev_link_t * link)
+static void cmm_cm4000_release(struct pcmcia_device * link)
{
struct cm4000_dev *dev = link->priv;
@@ -1759,13 +1759,11 @@ static void cmm_cm4000_release(dev_link_t * link)
/*==== Interface to PCMCIA Layer =======================================*/
-static void cm4000_config(dev_link_t * link, int devno)
+static int cm4000_config(struct pcmcia_device * link, int devno)
{
- client_handle_t handle = link->handle;
struct cm4000_dev *dev;
tuple_t tuple;
cisparse_t parse;
- config_info_t conf;
u_char buf[64];
int fail_fn, fail_rc;
int rc;
@@ -1777,41 +1775,34 @@ static void cm4000_config(dev_link_t * link, int devno)
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- if ((fail_rc = pcmcia_get_first_tuple(handle, &tuple)) != CS_SUCCESS) {
+ if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
fail_fn = GetFirstTuple;
goto cs_failed;
}
- if ((fail_rc = pcmcia_get_tuple_data(handle, &tuple)) != CS_SUCCESS) {
+ if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
fail_fn = GetTupleData;
goto cs_failed;
}
if ((fail_rc =
- pcmcia_parse_tuple(handle, &tuple, &parse)) != CS_SUCCESS) {
+ pcmcia_parse_tuple(link, &tuple, &parse)) != CS_SUCCESS) {
fail_fn = ParseTuple;
goto cs_failed;
}
- if ((fail_rc =
- pcmcia_get_configuration_info(handle, &conf)) != CS_SUCCESS) {
- fail_fn = GetConfigurationInfo;
- goto cs_failed;
- }
- link->state |= DEV_CONFIG;
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- link->conf.Vcc = conf.Vcc;
link->io.BasePort2 = 0;
link->io.NumPorts2 = 0;
link->io.Attributes2 = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- for (rc = pcmcia_get_first_tuple(handle, &tuple);
- rc == CS_SUCCESS; rc = pcmcia_get_next_tuple(handle, &tuple)) {
+ for (rc = pcmcia_get_first_tuple(link, &tuple);
+ rc == CS_SUCCESS; rc = pcmcia_get_next_tuple(link, &tuple)) {
- rc = pcmcia_get_tuple_data(handle, &tuple);
+ rc = pcmcia_get_tuple_data(link, &tuple);
if (rc != CS_SUCCESS)
continue;
- rc = pcmcia_parse_tuple(handle, &tuple, &parse);
+ rc = pcmcia_parse_tuple(link, &tuple, &parse);
if (rc != CS_SUCCESS)
continue;
@@ -1831,7 +1822,7 @@ static void cm4000_config(dev_link_t * link, int devno)
link->io.IOAddrLines = parse.cftable_entry.io.flags
& CISTPL_IO_LINES_MASK;
- rc = pcmcia_request_io(handle, &link->io);
+ rc = pcmcia_request_io(link, &link->io);
if (rc == CS_SUCCESS)
break; /* we are done */
}
@@ -1841,7 +1832,7 @@ static void cm4000_config(dev_link_t * link, int devno)
link->conf.IntType = 00000002;
if ((fail_rc =
- pcmcia_request_configuration(handle, &link->conf)) != CS_SUCCESS) {
+ pcmcia_request_configuration(link, &link->conf)) != CS_SUCCESS) {
fail_fn = RequestConfiguration;
goto cs_release;
}
@@ -1851,63 +1842,48 @@ static void cm4000_config(dev_link_t * link, int devno)
dev->node.major = major;
dev->node.minor = devno;
dev->node.next = NULL;
- link->dev = &dev->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &dev->node;
- return;
+ return 0;
cs_failed:
- cs_error(handle, fail_fn, fail_rc);
+ cs_error(link, fail_fn, fail_rc);
cs_release:
cm4000_release(link);
-
- link->state &= ~DEV_CONFIG_PENDING;
+ return -ENODEV;
}
-static int cm4000_suspend(struct pcmcia_device *p_dev)
+static int cm4000_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct cm4000_dev *dev;
dev = link->priv;
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
stop_monitor(dev);
return 0;
}
-static int cm4000_resume(struct pcmcia_device *p_dev)
+static int cm4000_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct cm4000_dev *dev;
dev = link->priv;
-
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
-
if (link->open)
start_monitor(dev);
return 0;
}
-static void cm4000_release(dev_link_t *link)
+static void cm4000_release(struct pcmcia_device *link)
{
cmm_cm4000_release(link->priv); /* delay release until device closed */
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
+ pcmcia_disable_device(link);
}
-static int cm4000_attach(struct pcmcia_device *p_dev)
+static int cm4000_probe(struct pcmcia_device *link)
{
struct cm4000_dev *dev;
- dev_link_t *link;
- int i;
+ int i, ret;
for (i = 0; i < CM4000_MAX_DEV; i++)
if (dev_table[i] == NULL)
@@ -1923,7 +1899,7 @@ static int cm4000_attach(struct pcmcia_device *p_dev)
if (dev == NULL)
return -ENOMEM;
- link = &dev->link;
+ dev->p_dev = link;
link->priv = dev;
link->conf.IntType = INT_MEMORY_AND_IO;
dev_table[i] = link;
@@ -1933,11 +1909,9 @@ static int cm4000_attach(struct pcmcia_device *p_dev)
init_waitqueue_head(&dev->atrq);
init_waitqueue_head(&dev->readq);
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- cm4000_config(link, i);
+ ret = cm4000_config(link, i);
+ if (ret)
+ return ret;
class_device_create(cmm_class, NULL, MKDEV(major, i), NULL,
"cmm%d", i);
@@ -1945,9 +1919,8 @@ static int cm4000_attach(struct pcmcia_device *p_dev)
return 0;
}
-static void cm4000_detach(struct pcmcia_device *p_dev)
+static void cm4000_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct cm4000_dev *dev = link->priv;
int devno;
@@ -1958,11 +1931,9 @@ static void cm4000_detach(struct pcmcia_device *p_dev)
if (devno == CM4000_MAX_DEV)
return;
- link->state &= ~DEV_PRESENT;
stop_monitor(dev);
- if (link->state & DEV_CONFIG)
- cm4000_release(link);
+ cm4000_release(link);
dev_table[devno] = NULL;
kfree(dev);
@@ -1993,7 +1964,7 @@ static struct pcmcia_driver cm4000_driver = {
.drv = {
.name = "cm4000_cs",
},
- .probe = cm4000_attach,
+ .probe = cm4000_probe,
.remove = cm4000_detach,
.suspend = cm4000_suspend,
.resume = cm4000_resume,
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 466e33bab029e..29efa64580a89 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -41,7 +41,7 @@
#ifdef PCMCIA_DEBUG
-#define reader_to_dev(x) (&handle_to_dev(x->link.handle))
+#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle))
static int pc_debug = PCMCIA_DEBUG;
module_param(pc_debug, int, 0600);
#define DEBUGP(n, rdr, x, args...) do { \
@@ -65,7 +65,7 @@ static char *version =
/* how often to poll for fifo status change */
#define POLL_PERIOD msecs_to_jiffies(10)
-static void reader_release(dev_link_t *link);
+static void reader_release(struct pcmcia_device *link);
static int major;
static struct class *cmx_class;
@@ -74,7 +74,7 @@ static struct class *cmx_class;
#define BS_WRITABLE 0x02
struct reader_dev {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
wait_queue_head_t devq;
wait_queue_head_t poll_wait;
@@ -87,7 +87,7 @@ struct reader_dev {
struct timer_list poll_timer;
};
-static dev_link_t *dev_table[CM_MAX_DEV];
+static struct pcmcia_device *dev_table[CM_MAX_DEV];
#ifndef PCMCIA_DEBUG
#define xoutb outb
@@ -116,7 +116,7 @@ static inline unsigned char xinb(unsigned short port)
static void cm4040_do_poll(unsigned long dummy)
{
struct reader_dev *dev = (struct reader_dev *) dummy;
- unsigned int obs = xinb(dev->link.io.BasePort1
+ unsigned int obs = xinb(dev->p_dev->io.BasePort1
+ REG_OFFSET_BUFFER_STATUS);
if ((obs & BSR_BULK_IN_FULL)) {
@@ -147,7 +147,7 @@ static void cm4040_stop_poll(struct reader_dev *dev)
static int wait_for_bulk_out_ready(struct reader_dev *dev)
{
int i, rc;
- int iobase = dev->link.io.BasePort1;
+ int iobase = dev->p_dev->io.BasePort1;
for (i = 0; i < POLL_LOOP_COUNT; i++) {
if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
@@ -177,7 +177,7 @@ static int wait_for_bulk_out_ready(struct reader_dev *dev)
/* Write to Sync Control Register */
static int write_sync_reg(unsigned char val, struct reader_dev *dev)
{
- int iobase = dev->link.io.BasePort1;
+ int iobase = dev->p_dev->io.BasePort1;
int rc;
rc = wait_for_bulk_out_ready(dev);
@@ -195,7 +195,7 @@ static int write_sync_reg(unsigned char val, struct reader_dev *dev)
static int wait_for_bulk_in_ready(struct reader_dev *dev)
{
int i, rc;
- int iobase = dev->link.io.BasePort1;
+ int iobase = dev->p_dev->io.BasePort1;
for (i = 0; i < POLL_LOOP_COUNT; i++) {
if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
@@ -225,7 +225,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
struct reader_dev *dev = filp->private_data;
- int iobase = dev->link.io.BasePort1;
+ int iobase = dev->p_dev->io.BasePort1;
size_t bytes_to_read;
unsigned long i;
size_t min_bytes_to_read;
@@ -246,7 +246,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
return -EAGAIN;
}
- if ((dev->link.state & DEV_PRESENT)==0)
+ if (!pcmcia_dev_present(dev->p_dev))
return -ENODEV;
for (i = 0; i < 5; i++) {
@@ -328,7 +328,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct reader_dev *dev = filp->private_data;
- int iobase = dev->link.io.BasePort1;
+ int iobase = dev->p_dev->io.BasePort1;
ssize_t rc;
int i;
unsigned int bytes_to_write;
@@ -351,7 +351,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
return -EAGAIN;
}
- if ((dev->link.state & DEV_PRESENT) == 0)
+ if (!pcmcia_dev_present(dev->p_dev))
return -ENODEV;
bytes_to_write = count;
@@ -445,14 +445,14 @@ static unsigned int cm4040_poll(struct file *filp, poll_table *wait)
static int cm4040_open(struct inode *inode, struct file *filp)
{
struct reader_dev *dev;
- dev_link_t *link;
+ struct pcmcia_device *link;
int minor = iminor(inode);
if (minor >= CM_MAX_DEV)
return -ENODEV;
link = dev_table[minor];
- if (link == NULL || !(DEV_OK(link)))
+ if (link == NULL || !pcmcia_dev_present(link))
return -ENODEV;
if (link->open)
@@ -478,7 +478,7 @@ static int cm4040_open(struct inode *inode, struct file *filp)
static int cm4040_close(struct inode *inode, struct file *filp)
{
struct reader_dev *dev = filp->private_data;
- dev_link_t *link;
+ struct pcmcia_device *link;
int minor = iminor(inode);
DEBUGP(2, dev, "-> cm4040_close(maj/min=%d.%d)\n", imajor(inode),
@@ -500,7 +500,7 @@ static int cm4040_close(struct inode *inode, struct file *filp)
return 0;
}
-static void cm4040_reader_release(dev_link_t *link)
+static void cm4040_reader_release(struct pcmcia_device *link)
{
struct reader_dev *dev = link->priv;
@@ -514,60 +514,49 @@ static void cm4040_reader_release(dev_link_t *link)
return;
}
-static void reader_config(dev_link_t *link, int devno)
+static int reader_config(struct pcmcia_device *link, int devno)
{
- client_handle_t handle;
struct reader_dev *dev;
tuple_t tuple;
cisparse_t parse;
- config_info_t conf;
u_char buf[64];
int fail_fn, fail_rc;
int rc;
- handle = link->handle;
-
tuple.DesiredTuple = CISTPL_CONFIG;
tuple.Attributes = 0;
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- if ((fail_rc = pcmcia_get_first_tuple(handle, &tuple)) != CS_SUCCESS) {
+ if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
fail_fn = GetFirstTuple;
goto cs_failed;
}
- if ((fail_rc = pcmcia_get_tuple_data(handle, &tuple)) != CS_SUCCESS) {
+ if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
fail_fn = GetTupleData;
goto cs_failed;
}
- if ((fail_rc = pcmcia_parse_tuple(handle, &tuple, &parse))
+ if ((fail_rc = pcmcia_parse_tuple(link, &tuple, &parse))
!= CS_SUCCESS) {
fail_fn = ParseTuple;
goto cs_failed;
}
- if ((fail_rc = pcmcia_get_configuration_info(handle, &conf))
- != CS_SUCCESS) {
- fail_fn = GetConfigurationInfo;
- goto cs_failed;
- }
- link->state |= DEV_CONFIG;
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- link->conf.Vcc = conf.Vcc;
link->io.BasePort2 = 0;
link->io.NumPorts2 = 0;
link->io.Attributes2 = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- for (rc = pcmcia_get_first_tuple(handle, &tuple);
+ for (rc = pcmcia_get_first_tuple(link, &tuple);
rc == CS_SUCCESS;
- rc = pcmcia_get_next_tuple(handle, &tuple)) {
- rc = pcmcia_get_tuple_data(handle, &tuple);
+ rc = pcmcia_get_next_tuple(link, &tuple)) {
+ rc = pcmcia_get_tuple_data(link, &tuple);
if (rc != CS_SUCCESS)
continue;
- rc = pcmcia_parse_tuple(handle, &tuple, &parse);
+ rc = pcmcia_parse_tuple(link, &tuple, &parse);
if (rc != CS_SUCCESS)
continue;
@@ -585,13 +574,13 @@ static void reader_config(dev_link_t *link, int devno)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.IOAddrLines = parse.cftable_entry.io.flags
& CISTPL_IO_LINES_MASK;
- rc = pcmcia_request_io(handle, &link->io);
+ rc = pcmcia_request_io(link, &link->io);
- dev_printk(KERN_INFO, &handle_to_dev(handle), "foo");
+ dev_printk(KERN_INFO, &handle_to_dev(link), "foo");
if (rc == CS_SUCCESS)
break;
else
- dev_printk(KERN_INFO, &handle_to_dev(handle),
+ dev_printk(KERN_INFO, &handle_to_dev(link),
"pcmcia_request_io failed 0x%x\n", rc);
}
if (rc != CS_SUCCESS)
@@ -599,10 +588,10 @@ static void reader_config(dev_link_t *link, int devno)
link->conf.IntType = 00000002;
- if ((fail_rc = pcmcia_request_configuration(handle,&link->conf))
+ if ((fail_rc = pcmcia_request_configuration(link,&link->conf))
!=CS_SUCCESS) {
fail_fn = RequestConfiguration;
- dev_printk(KERN_INFO, &handle_to_dev(handle),
+ dev_printk(KERN_INFO, &handle_to_dev(link),
"pcmcia_request_configuration failed 0x%x\n",
fail_rc);
goto cs_release;
@@ -612,57 +601,31 @@ static void reader_config(dev_link_t *link, int devno)
sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
dev->node.major = major;
dev->node.minor = devno;
- dev->node.next = NULL;
- link->dev = &dev->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ dev->node.next = &dev->node;
DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno,
link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1);
DEBUGP(2, dev, "<- reader_config (succ)\n");
- return;
+ return 0;
cs_failed:
- cs_error(handle, fail_fn, fail_rc);
+ cs_error(link, fail_fn, fail_rc);
cs_release:
reader_release(link);
- link->state &= ~DEV_CONFIG_PENDING;
-}
-
-static int reader_suspend(struct pcmcia_device *p_dev)
-{
- dev_link_t *link = dev_to_instance(p_dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
+ return -ENODEV;
}
-static int reader_resume(struct pcmcia_device *p_dev)
-{
- dev_link_t *link = dev_to_instance(p_dev);
-
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
-}
-
-static void reader_release(dev_link_t *link)
+static void reader_release(struct pcmcia_device *link)
{
cm4040_reader_release(link->priv);
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
+ pcmcia_disable_device(link);
}
-static int reader_attach(struct pcmcia_device *p_dev)
+static int reader_probe(struct pcmcia_device *link)
{
struct reader_dev *dev;
- dev_link_t *link;
- int i;
+ int i, ret;
for (i = 0; i < CM_MAX_DEV; i++) {
if (dev_table[i] == NULL)
@@ -679,8 +642,8 @@ static int reader_attach(struct pcmcia_device *p_dev)
dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT;
dev->buffer_status = 0;
- link = &dev->link;
link->priv = dev;
+ dev->p_dev = link;
link->conf.IntType = INT_MEMORY_AND_IO;
dev_table[i] = link;
@@ -692,11 +655,9 @@ static int reader_attach(struct pcmcia_device *p_dev)
init_timer(&dev->poll_timer);
dev->poll_timer.function = &cm4040_do_poll;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- reader_config(link, i);
+ ret = reader_config(link, i);
+ if (ret)
+ return ret;
class_device_create(cmx_class, NULL, MKDEV(major, i), NULL,
"cmx%d", i);
@@ -704,9 +665,8 @@ static int reader_attach(struct pcmcia_device *p_dev)
return 0;
}
-static void reader_detach(struct pcmcia_device *p_dev)
+static void reader_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct reader_dev *dev = link->priv;
int devno;
@@ -718,10 +678,7 @@ static void reader_detach(struct pcmcia_device *p_dev)
if (devno == CM_MAX_DEV)
return;
- link->state &= ~DEV_PRESENT;
-
- if (link->state & DEV_CONFIG)
- reader_release(link);
+ reader_release(link);
dev_table[devno] = NULL;
kfree(dev);
@@ -753,10 +710,8 @@ static struct pcmcia_driver reader_driver = {
.drv = {
.name = "cm4040_cs",
},
- .probe = reader_attach,
+ .probe = reader_probe,
.remove = reader_detach,
- .suspend = reader_suspend,
- .resume = reader_resume,
.id_table = cm4040_ids,
};
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index e6b714b6390df..07213454c458c 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -228,7 +228,7 @@ typedef struct _mgslpc_info {
struct _input_signal_events input_signal_events;
/* PCMCIA support */
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
int stop;
@@ -484,7 +484,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout);
/* PCMCIA prototypes */
-static void mgslpc_config(dev_link_t *link);
+static int mgslpc_config(struct pcmcia_device *link);
static void mgslpc_release(u_long arg);
static void mgslpc_detach(struct pcmcia_device *p_dev);
@@ -533,14 +533,14 @@ static void ldisc_receive_buf(struct tty_struct *tty,
}
}
-static int mgslpc_attach(struct pcmcia_device *p_dev)
+static int mgslpc_probe(struct pcmcia_device *link)
{
MGSLPC_INFO *info;
- dev_link_t *link;
-
+ int ret;
+
if (debug_level >= DEBUG_LEVEL_INFO)
printk("mgslpc_attach\n");
-
+
info = (MGSLPC_INFO *)kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
if (!info) {
printk("Error can't allocate device instance data\n");
@@ -565,25 +565,22 @@ static int mgslpc_attach(struct pcmcia_device *p_dev)
info->imrb_value = 0xffff;
info->pim_value = 0xff;
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
-
- /* Initialize the dev_link_t structure */
+
+ /* Initialize the struct pcmcia_device structure */
/* Interrupt setup */
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->irq.Handler = NULL;
-
+
link->conf.Attributes = 0;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- mgslpc_config(link);
+ ret = mgslpc_config(link);
+ if (ret)
+ return ret;
mgslpc_add_device(info);
@@ -596,15 +593,13 @@ static int mgslpc_attach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void mgslpc_config(dev_link_t *link)
+static int mgslpc_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
MGSLPC_INFO *info = link->priv;
tuple_t tuple;
cisparse_t parse;
int last_fn, last_ret;
u_char buf[64];
- config_info_t conf;
cistpl_cftable_entry_t dflt = { 0 };
cistpl_cftable_entry_t *cfg;
@@ -617,27 +612,20 @@ static void mgslpc_config(dev_link_t *link)
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
-
- /* Configure card */
- link->state |= DEV_CONFIG;
-
- /* Look up the current Vcc */
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
/* get CIS configuration entry */
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
cfg = &(parse.cftable_entry);
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
if (cfg->index == 0)
@@ -658,11 +646,10 @@ static void mgslpc_config(dev_link_t *link)
link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
link->io.BasePort1 = io->win[0].base;
link->io.NumPorts1 = io->win[0].len;
- CS_CHECK(RequestIO, pcmcia_request_io(link->handle, &link->io));
+ CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
}
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 8;
link->conf.Present = PRESENT_OPTION;
@@ -670,9 +657,9 @@ static void mgslpc_config(dev_link_t *link)
link->irq.Attributes |= IRQ_HANDLE_PRESENT;
link->irq.Handler = mgslpc_isr;
link->irq.Instance = info;
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
info->io_base = link->io.BasePort1;
info->irq_level = link->irq.AssignedIRQ;
@@ -680,7 +667,7 @@ static void mgslpc_config(dev_link_t *link)
/* add to linked list of devices */
sprintf(info->node.dev_name, "mgslpc0");
info->node.major = info->node.minor = 0;
- link->dev = &info->node;
+ link->dev_node = &info->node;
printk(KERN_INFO "%s: index 0x%02x:",
info->node.dev_name, link->conf.ConfigIndex);
@@ -690,13 +677,12 @@ static void mgslpc_config(dev_link_t *link)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
printk("\n");
-
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
mgslpc_release((u_long)link);
+ return -ENODEV;
}
/* Card has been removed.
@@ -705,58 +691,38 @@ cs_failed:
*/
static void mgslpc_release(u_long arg)
{
- dev_link_t *link = (dev_link_t *)arg;
+ struct pcmcia_device *link = (struct pcmcia_device *)arg;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_release(0x%p)\n", link);
-
- /* Unlink the device chain */
- link->dev = NULL;
- link->state &= ~DEV_CONFIG;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_release(0x%p)\n", link);
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- if (link->irq.AssignedIRQ)
- pcmcia_release_irq(link->handle, &link->irq);
+ pcmcia_disable_device(link);
}
-static void mgslpc_detach(struct pcmcia_device *p_dev)
+static void mgslpc_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_detach(0x%p)\n", link);
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG) {
- ((MGSLPC_INFO *)link->priv)->stop = 1;
- mgslpc_release((u_long)link);
- }
+ ((MGSLPC_INFO *)link->priv)->stop = 1;
+ mgslpc_release((u_long)link);
- mgslpc_remove_device((MGSLPC_INFO *)link->priv);
+ mgslpc_remove_device((MGSLPC_INFO *)link->priv);
}
-static int mgslpc_suspend(struct pcmcia_device *dev)
+static int mgslpc_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
MGSLPC_INFO *info = link->priv;
- link->state |= DEV_SUSPEND;
info->stop = 1;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
return 0;
}
-static int mgslpc_resume(struct pcmcia_device *dev)
+static int mgslpc_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
MGSLPC_INFO *info = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
info->stop = 0;
return 0;
@@ -1280,7 +1246,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id, struct pt_regs * regs)
if (!info)
return IRQ_NONE;
- if (!(info->link.state & DEV_CONFIG))
+ if (!(info->p_dev->_locked))
return IRQ_HANDLED;
spin_lock(&info->lock);
@@ -3033,7 +2999,7 @@ static struct pcmcia_driver mgslpc_driver = {
.drv = {
.name = "synclink_cs",
},
- .probe = mgslpc_attach,
+ .probe = mgslpc_probe,
.remove = mgslpc_detach,
.id_table = mgslpc_ids,
.suspend = mgslpc_suspend,
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 3f5d6077f39c3..a9c5a7230f895 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -504,7 +504,6 @@ static int stl_echmcaintr(stlbrd_t *brdp);
static int stl_echpciintr(stlbrd_t *brdp);
static int stl_echpci64intr(stlbrd_t *brdp);
static void stl_offintr(void *private);
-static void *stl_memalloc(int len);
static stlbrd_t *stl_allocbrd(void);
static stlport_t *stl_getport(int brdnr, int panelnr, int portnr);
@@ -940,17 +939,6 @@ static int stl_parsebrd(stlconf_t *confp, char **argp)
/*****************************************************************************/
/*
- * Local driver kernel memory allocation routine.
- */
-
-static void *stl_memalloc(int len)
-{
- return (void *) kmalloc(len, GFP_KERNEL);
-}
-
-/*****************************************************************************/
-
-/*
* Allocate a new board structure. Fill out the basic info in it.
*/
@@ -958,14 +946,13 @@ static stlbrd_t *stl_allocbrd(void)
{
stlbrd_t *brdp;
- brdp = (stlbrd_t *) stl_memalloc(sizeof(stlbrd_t));
- if (brdp == (stlbrd_t *) NULL) {
+ brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL);
+ if (!brdp) {
printk("STALLION: failed to allocate memory (size=%d)\n",
sizeof(stlbrd_t));
- return (stlbrd_t *) NULL;
+ return NULL;
}
- memset(brdp, 0, sizeof(stlbrd_t));
brdp->magic = STL_BOARDMAGIC;
return brdp;
}
@@ -1017,9 +1004,9 @@ static int stl_open(struct tty_struct *tty, struct file *filp)
portp->refcount++;
if ((portp->flags & ASYNC_INITIALIZED) == 0) {
- if (portp->tx.buf == (char *) NULL) {
- portp->tx.buf = (char *) stl_memalloc(STL_TXBUFSIZE);
- if (portp->tx.buf == (char *) NULL)
+ if (!portp->tx.buf) {
+ portp->tx.buf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
+ if (!portp->tx.buf)
return -ENOMEM;
portp->tx.head = portp->tx.buf;
portp->tx.tail = portp->tx.buf;
@@ -2178,13 +2165,12 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp)
* each ports data structures.
*/
for (i = 0; (i < panelp->nrports); i++) {
- portp = (stlport_t *) stl_memalloc(sizeof(stlport_t));
- if (portp == (stlport_t *) NULL) {
+ portp = kzalloc(sizeof(stlport_t), GFP_KERNEL);
+ if (!portp) {
printk("STALLION: failed to allocate memory "
"(size=%d)\n", sizeof(stlport_t));
break;
}
- memset(portp, 0, sizeof(stlport_t));
portp->magic = STL_PORTMAGIC;
portp->portnr = i;
@@ -2315,13 +2301,12 @@ static inline int stl_initeio(stlbrd_t *brdp)
* can complete the setup.
*/
- panelp = (stlpanel_t *) stl_memalloc(sizeof(stlpanel_t));
- if (panelp == (stlpanel_t *) NULL) {
+ panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
+ if (!panelp) {
printk(KERN_WARNING "STALLION: failed to allocate memory "
"(size=%d)\n", sizeof(stlpanel_t));
- return(-ENOMEM);
+ return -ENOMEM;
}
- memset(panelp, 0, sizeof(stlpanel_t));
panelp->magic = STL_PANELMAGIC;
panelp->brdnr = brdp->brdnr;
@@ -2490,13 +2475,12 @@ static inline int stl_initech(stlbrd_t *brdp)
status = inb(ioaddr + ECH_PNLSTATUS);
if ((status & ECH_PNLIDMASK) != nxtid)
break;
- panelp = (stlpanel_t *) stl_memalloc(sizeof(stlpanel_t));
- if (panelp == (stlpanel_t *) NULL) {
+ panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
+ if (!panelp) {
printk("STALLION: failed to allocate memory "
"(size=%d)\n", sizeof(stlpanel_t));
break;
}
- memset(panelp, 0, sizeof(stlpanel_t));
panelp->magic = STL_PANELMAGIC;
panelp->brdnr = brdp->brdnr;
panelp->panelnr = panelnr;
@@ -3074,8 +3058,8 @@ static int __init stl_init(void)
/*
* Allocate a temporary write buffer.
*/
- stl_tmpwritebuf = (char *) stl_memalloc(STL_TXBUFSIZE);
- if (stl_tmpwritebuf == (char *) NULL)
+ stl_tmpwritebuf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
+ if (!stl_tmpwritebuf)
printk("STALLION: failed to allocate memory (size=%d)\n",
STL_TXBUFSIZE);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 0bfd1b63662ea..98b126c2ded86 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -376,7 +376,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, s
return copied;
}
-EXPORT_SYMBOL_GPL(tty_insert_flip_string);
+EXPORT_SYMBOL(tty_insert_flip_string);
int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size)
{
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index ca4844c527dae..acc5d47844eb4 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2328,6 +2328,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
case TIOCL_SETVESABLANK:
set_vesa_blanking(p);
break;
+ case TIOCL_GETKMSGREDIRECT:
+ data = kmsg_redirect;
+ ret = __put_user(data, p);
+ break;
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 16e99db2e12d6..d53f664a4dd89 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -60,6 +60,13 @@ config SOFT_WATCHDOG
# ARM Architecture
+config AT91_WATCHDOG
+ tristate "AT91RM9200 watchdog"
+ depends on WATCHDOG && ARCH_AT91RM9200
+ help
+ Watchdog timer embedded into AT91RM9200 chips. This will reboot your
+ system when the timeout is reached.
+
config 21285_WATCHDOG
tristate "DC21285 watchdog"
depends on WATCHDOG && FOOTBRIDGE
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index d6f27fde99056..6ab77b61a6434 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_WDTPCI) += wdt_pci.o
obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
# ARM Architecture
+obj-$(CONFIG_AT91_WATCHDOG) += at91_wdt.o
obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
obj-$(CONFIG_977_WATCHDOG) += wdt977.o
obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
diff --git a/drivers/char/watchdog/at91_wdt.c b/drivers/char/watchdog/at91_wdt.c
new file mode 100644
index 0000000000000..ac83bc4b019ad
--- /dev/null
+++ b/drivers/char/watchdog/at91_wdt.c
@@ -0,0 +1,228 @@
+/*
+ * Watchdog driver for Atmel AT91RM9200 (Thunder)
+ *
+ * Copyright (C) 2003 SAN People (Pty) Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+
+
+#define WDT_DEFAULT_TIME 5 /* 5 seconds */
+#define WDT_MAX_TIME 256 /* 256 seconds */
+
+static int wdt_time = WDT_DEFAULT_TIME;
+static int nowayout = WATCHDOG_NOWAYOUT;
+
+module_param(wdt_time, int, 0);
+MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="__MODULE_STRING(WDT_DEFAULT_TIME) ")");
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+
+static unsigned long at91wdt_busy;
+
+/* ......................................................................... */
+
+/*
+ * Disable the watchdog.
+ */
+static void inline at91_wdt_stop(void)
+{
+ at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN);
+}
+
+/*
+ * Enable and reset the watchdog.
+ */
+static void inline at91_wdt_start(void)
+{
+ at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN | (((65536 * wdt_time) >> 8) & AT91_ST_WDV));
+ at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
+}
+
+/*
+ * Reload the watchdog timer. (ie, pat the watchdog)
+ */
+static void inline at91_wdt_reload(void)
+{
+ at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
+}
+
+/* ......................................................................... */
+
+/*
+ * Watchdog device is opened, and watchdog starts running.
+ */
+static int at91_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &at91wdt_busy))
+ return -EBUSY;
+
+ at91_wdt_start();
+ return nonseekable_open(inode, file);
+}
+
+/*
+ * Close the watchdog device.
+ * If CONFIG_WATCHDOG_NOWAYOUT is NOT defined then the watchdog is also
+ * disabled.
+ */
+static int at91_wdt_close(struct inode *inode, struct file *file)
+{
+ if (!nowayout)
+ at91_wdt_stop(); /* Disable the watchdog when file is closed */
+
+ clear_bit(0, &at91wdt_busy);
+ return 0;
+}
+
+/*
+ * Change the watchdog time interval.
+ */
+static int at91_wdt_settimeout(int new_time)
+{
+ /*
+ * All counting occurs at SLOW_CLOCK / 128 = 0.256 Hz
+ *
+ * Since WDV is a 16-bit counter, the maximum period is
+ * 65536 / 0.256 = 256 seconds.
+ */
+ if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
+ return -EINVAL;
+
+ /* Set new watchdog time. It will be used when at91_wdt_start() is called. */
+ wdt_time = new_time;
+ return 0;
+}
+
+static struct watchdog_info at91_wdt_info = {
+ .identity = "at91 watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+};
+
+/*
+ * Handle commands from user-space.
+ */
+static int at91_wdt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_value;
+
+ switch(cmd) {
+ case WDIOC_KEEPALIVE:
+ at91_wdt_reload(); /* pat the watchdog */
+ return 0;
+
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &at91_wdt_info, sizeof(at91_wdt_info)) ? -EFAULT : 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_value, p))
+ return -EFAULT;
+
+ if (at91_wdt_settimeout(new_value))
+ return -EINVAL;
+
+ /* Enable new time value */
+ at91_wdt_start();
+
+ /* Return current value */
+ return put_user(wdt_time, p);
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt_time, p);
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_value, p))
+ return -EFAULT;
+
+ if (new_value & WDIOS_DISABLECARD)
+ at91_wdt_stop();
+ if (new_value & WDIOS_ENABLECARD)
+ at91_wdt_start();
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/*
+ * Pat the watchdog whenever device is written to.
+ */
+static ssize_t at91_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
+{
+ at91_wdt_reload(); /* pat the watchdog */
+ return len;
+}
+
+/* ......................................................................... */
+
+static struct file_operations at91wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .ioctl = at91_wdt_ioctl,
+ .open = at91_wdt_open,
+ .release = at91_wdt_close,
+ .write = at91_wdt_write,
+};
+
+static struct miscdevice at91wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &at91wdt_fops,
+};
+
+static int __init at91_wdt_init(void)
+{
+ int res;
+
+ /* Check that the heartbeat value is within range; if not reset to the default */
+ if (at91_wdt_settimeout(wdt_time)) {
+ at91_wdt_settimeout(WDT_DEFAULT_TIME);
+ printk(KERN_INFO "at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time);
+ }
+
+ res = misc_register(&at91wdt_miscdev);
+ if (res)
+ return res;
+
+ printk("AT91 Watchdog Timer enabled (%d seconds, nowayout=%d)\n", wdt_time, nowayout);
+ return 0;
+}
+
+static void __exit at91_wdt_exit(void)
+{
+ misc_deregister(&at91wdt_miscdev);
+}
+
+module_init(at91_wdt_init);
+module_exit(at91_wdt_exit);
+
+MODULE_AUTHOR("Andrew Victor");
+MODULE_DESCRIPTION("Watchdog driver for Atmel AT91RM9200");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/pcwd.c b/drivers/char/watchdog/pcwd.c
index 8d6b249ad66b8..6d44ca68312df 100644
--- a/drivers/char/watchdog/pcwd.c
+++ b/drivers/char/watchdog/pcwd.c
@@ -66,15 +66,13 @@
#include <linux/fs.h> /* For file operations */
#include <linux/ioport.h> /* For io-port access */
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
-#include <linux/sched.h> /* TASK_INTERRUPTIBLE, set_current_state() and friends */
-#include <linux/slab.h> /* For kmalloc */
#include <asm/uaccess.h> /* For copy_to_user/put_user/... */
#include <asm/io.h> /* For inb/outb/... */
/* Module and version information */
-#define WATCHDOG_VERSION "1.16"
-#define WATCHDOG_DATE "03 Jan 2006"
+#define WATCHDOG_VERSION "1.17"
+#define WATCHDOG_DATE "12 Feb 2006"
#define WATCHDOG_DRIVER_NAME "ISA-PC Watchdog"
#define WATCHDOG_NAME "pcwd"
#define PFX WATCHDOG_NAME ": "
@@ -96,15 +94,19 @@
* PCI-PC Watchdog card.
*/
/* Port 1 : Control Status #1 for the PC Watchdog card, revision A. */
-#define WD_WDRST 0x01 /* Previously reset state */
-#define WD_T110 0x02 /* Temperature overheat sense */
-#define WD_HRTBT 0x04 /* Heartbeat sense */
-#define WD_RLY2 0x08 /* External relay triggered */
-#define WD_SRLY2 0x80 /* Software external relay triggered */
+#define WD_WDRST 0x01 /* Previously reset state */
+#define WD_T110 0x02 /* Temperature overheat sense */
+#define WD_HRTBT 0x04 /* Heartbeat sense */
+#define WD_RLY2 0x08 /* External relay triggered */
+#define WD_SRLY2 0x80 /* Software external relay triggered */
/* Port 1 : Control Status #1 for the PC Watchdog card, revision C. */
-#define WD_REVC_WTRP 0x01 /* Watchdog Trip status */
-#define WD_REVC_HRBT 0x02 /* Watchdog Heartbeat */
-#define WD_REVC_TTRP 0x04 /* Temperature Trip status */
+#define WD_REVC_WTRP 0x01 /* Watchdog Trip status */
+#define WD_REVC_HRBT 0x02 /* Watchdog Heartbeat */
+#define WD_REVC_TTRP 0x04 /* Temperature Trip status */
+#define WD_REVC_RL2A 0x08 /* Relay 2 activated by on-board processor */
+#define WD_REVC_RL1A 0x10 /* Relay 1 active */
+#define WD_REVC_R2DS 0x40 /* Relay 2 disable */
+#define WD_REVC_RLY2 0x80 /* Relay 2 activated? */
/* Port 2 : Control Status #2 */
#define WD_WDIS 0x10 /* Watchdog Disabled */
#define WD_ENTP 0x20 /* Watchdog Enable Temperature Trip */
@@ -122,9 +124,14 @@
#define CMD_ISA_VERSION_HUNDRETH 0x03
#define CMD_ISA_VERSION_MINOR 0x04
#define CMD_ISA_SWITCH_SETTINGS 0x05
+#define CMD_ISA_RESET_PC 0x06
+#define CMD_ISA_ARM_0 0x07
+#define CMD_ISA_ARM_30 0x08
+#define CMD_ISA_ARM_60 0x09
#define CMD_ISA_DELAY_TIME_2SECS 0x0A
#define CMD_ISA_DELAY_TIME_4SECS 0x0B
#define CMD_ISA_DELAY_TIME_8SECS 0x0C
+#define CMD_ISA_RESET_RELAYS 0x0D
/*
* We are using an kernel timer to do the pinging of the watchdog
@@ -142,6 +149,7 @@ static atomic_t open_allowed = ATOMIC_INIT(1);
static char expect_close;
static int temp_panic;
static struct { /* this is private data for each ISA-PC watchdog card */
+ char fw_ver_str[6]; /* The cards firmware version */
int revision; /* The card's revision */
int supports_temp; /* Wether or not the card has a temperature device */
int command_mode; /* Wether or not the card is in command mode */
@@ -153,6 +161,13 @@ static struct { /* this is private data for each ISA-PC watchdog card */
} pcwd_private;
/* module parameters */
+#define QUIET 0 /* Default */
+#define VERBOSE 1 /* Verbose */
+#define DEBUG 2 /* print fancy stuff too */
+static int debug = QUIET;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)");
+
#define WATCHDOG_HEARTBEAT 60 /* 60 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT;
module_param(heartbeat, int, 0);
@@ -172,6 +187,10 @@ static int send_isa_command(int cmd)
int control_status;
int port0, last_port0; /* Double read for stabilising */
+ if (debug >= DEBUG)
+ printk(KERN_DEBUG PFX "sending following data cmd=0x%02x\n",
+ cmd);
+
/* The WCMD bit must be 1 and the command is only 4 bits in size */
control_status = (cmd & 0x0F) | WD_WCMD;
outb_p(control_status, pcwd_private.io_addr + 2);
@@ -188,6 +207,10 @@ static int send_isa_command(int cmd)
udelay (250);
}
+ if (debug >= DEBUG)
+ printk(KERN_DEBUG PFX "received following data for cmd=0x%02x: port0=0x%02x last_port0=0x%02x\n",
+ cmd, port0, last_port0);
+
return port0;
}
@@ -214,6 +237,10 @@ static int set_command_mode(void)
spin_unlock(&pcwd_private.io_lock);
pcwd_private.command_mode = found;
+ if (debug >= DEBUG)
+ printk(KERN_DEBUG PFX "command_mode=%d\n",
+ pcwd_private.command_mode);
+
return(found);
}
@@ -226,6 +253,10 @@ static void unset_command_mode(void)
spin_unlock(&pcwd_private.io_lock);
pcwd_private.command_mode = 0;
+
+ if (debug >= DEBUG)
+ printk(KERN_DEBUG PFX "command_mode=%d\n",
+ pcwd_private.command_mode);
}
static inline void pcwd_check_temperature_support(void)
@@ -234,27 +265,22 @@ static inline void pcwd_check_temperature_support(void)
pcwd_private.supports_temp = 1;
}
-static inline char *get_firmware(void)
+static inline void pcwd_get_firmware(void)
{
int one, ten, hund, minor;
- char *ret;
- ret = kmalloc(6, GFP_KERNEL);
- if(ret == NULL)
- return NULL;
+ strcpy(pcwd_private.fw_ver_str, "ERROR");
if (set_command_mode()) {
one = send_isa_command(CMD_ISA_VERSION_INTEGER);
ten = send_isa_command(CMD_ISA_VERSION_TENTH);
hund = send_isa_command(CMD_ISA_VERSION_HUNDRETH);
minor = send_isa_command(CMD_ISA_VERSION_MINOR);
- sprintf(ret, "%c.%c%c%c", one, ten, hund, minor);
+ sprintf(pcwd_private.fw_ver_str, "%c.%c%c%c", one, ten, hund, minor);
}
- else
- sprintf(ret, "ERROR");
-
unset_command_mode();
- return(ret);
+
+ return;
}
static inline int pcwd_get_option_switches(void)
@@ -272,17 +298,15 @@ static inline int pcwd_get_option_switches(void)
static void pcwd_show_card_info(void)
{
- char *firmware;
int option_switches;
/* Get some extra info from the hardware (in command/debug/diag mode) */
if (pcwd_private.revision == PCWD_REVISION_A)
printk(KERN_INFO PFX "ISA-PC Watchdog (REV.A) detected at port 0x%04x\n", pcwd_private.io_addr);
else if (pcwd_private.revision == PCWD_REVISION_C) {
- firmware = get_firmware();
+ pcwd_get_firmware();
printk(KERN_INFO PFX "ISA-PC Watchdog (REV.C) detected at port 0x%04x (Firmware version: %s)\n",
- pcwd_private.io_addr, firmware);
- kfree(firmware);
+ pcwd_private.io_addr, pcwd_private.fw_ver_str);
option_switches = pcwd_get_option_switches();
printk(KERN_INFO PFX "Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n",
option_switches,
@@ -362,6 +386,10 @@ static int pcwd_start(void)
return -EIO;
}
}
+
+ if (debug >= VERBOSE)
+ printk(KERN_DEBUG PFX "Watchdog started\n");
+
return 0;
}
@@ -386,6 +414,10 @@ static int pcwd_stop(void)
return -EIO;
}
}
+
+ if (debug >= VERBOSE)
+ printk(KERN_DEBUG PFX "Watchdog stopped\n");
+
return 0;
}
@@ -393,6 +425,10 @@ static int pcwd_keepalive(void)
{
/* user land ping */
pcwd_private.next_heartbeat = jiffies + (heartbeat * HZ);
+
+ if (debug >= DEBUG)
+ printk(KERN_DEBUG PFX "Watchdog keepalive signal send\n");
+
return 0;
}
@@ -402,12 +438,17 @@ static int pcwd_set_heartbeat(int t)
return -EINVAL;
heartbeat = t;
+
+ if (debug >= VERBOSE)
+ printk(KERN_DEBUG PFX "New heartbeat: %d\n",
+ heartbeat);
+
return 0;
}
static int pcwd_get_status(int *status)
{
- int card_status;
+ int control_status;
*status=0;
spin_lock(&pcwd_private.io_lock);
@@ -415,37 +456,39 @@ static int pcwd_get_status(int *status)
/* Rev A cards return status information from
* the base register, which is used for the
* temperature in other cards. */
- card_status = inb(pcwd_private.io_addr);
+ control_status = inb(pcwd_private.io_addr);
else {
/* Rev C cards return card status in the base
* address + 1 register. And use different bits
* to indicate a card initiated reset, and an
* over-temperature condition. And the reboot
* status can be reset. */
- card_status = inb(pcwd_private.io_addr + 1);
+ control_status = inb(pcwd_private.io_addr + 1);
}
spin_unlock(&pcwd_private.io_lock);
if (pcwd_private.revision == PCWD_REVISION_A) {
- if (card_status & WD_WDRST)
+ if (control_status & WD_WDRST)
*status |= WDIOF_CARDRESET;
- if (card_status & WD_T110) {
+ if (control_status & WD_T110) {
*status |= WDIOF_OVERHEAT;
if (temp_panic) {
printk (KERN_INFO PFX "Temperature overheat trip!\n");
kernel_power_off();
+ /* or should we just do a: panic(PFX "Temperature overheat trip!\n"); */
}
}
} else {
- if (card_status & WD_REVC_WTRP)
+ if (control_status & WD_REVC_WTRP)
*status |= WDIOF_CARDRESET;
- if (card_status & WD_REVC_TTRP) {
+ if (control_status & WD_REVC_TTRP) {
*status |= WDIOF_OVERHEAT;
if (temp_panic) {
printk (KERN_INFO PFX "Temperature overheat trip!\n");
kernel_power_off();
+ /* or should we just do a: panic(PFX "Temperature overheat trip!\n"); */
}
}
}
@@ -455,9 +498,25 @@ static int pcwd_get_status(int *status)
static int pcwd_clear_status(void)
{
+ int control_status;
+
if (pcwd_private.revision == PCWD_REVISION_C) {
spin_lock(&pcwd_private.io_lock);
- outb_p(0x00, pcwd_private.io_addr + 1); /* clear reset status */
+
+ if (debug >= VERBOSE)
+ printk(KERN_INFO PFX "clearing watchdog trip status\n");
+
+ control_status = inb_p(pcwd_private.io_addr + 1);
+
+ if (debug >= DEBUG) {
+ printk(KERN_DEBUG PFX "status was: 0x%02x\n", control_status);
+ printk(KERN_DEBUG PFX "sending: 0x%02x\n",
+ (control_status & WD_REVC_R2DS));
+ }
+
+ /* clear reset status & Keep Relay 2 disable state as it is */
+ outb_p((control_status & WD_REVC_R2DS), pcwd_private.io_addr + 1);
+
spin_unlock(&pcwd_private.io_lock);
}
return 0;
@@ -481,6 +540,11 @@ static int pcwd_get_temperature(int *temperature)
*temperature = ((inb(pcwd_private.io_addr)) * 9 / 5) + 32;
spin_unlock(&pcwd_private.io_lock);
+ if (debug >= DEBUG) {
+ printk(KERN_DEBUG PFX "temperature is: %d F\n",
+ *temperature);
+ }
+
return 0;
}
@@ -599,6 +663,8 @@ static ssize_t pcwd_write(struct file *file, const char __user *buf, size_t len,
static int pcwd_open(struct inode *inode, struct file *file)
{
if (!atomic_dec_and_test(&open_allowed) ) {
+ if (debug >= VERBOSE)
+ printk(KERN_ERR PFX "Attempt to open already opened device.\n");
atomic_inc( &open_allowed );
return -EBUSY;
}
@@ -922,7 +988,8 @@ static void __exit pcwd_cleanup_module(void)
{
if (pcwd_private.io_addr)
pcwatchdog_exit();
- return;
+
+ printk(KERN_INFO PFX "Watchdog Module Unloaded.\n");
}
module_init(pcwd_init_module);
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 2700c5c45b8ab..3fdfda9324fae 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -705,7 +705,8 @@ err_out_misc_deregister:
err_out_unregister_reboot:
unregister_reboot_notifier(&usb_pcwd_notifier);
error:
- usb_pcwd_delete (usb_pcwd);
+ if (usb_pcwd)
+ usb_pcwd_delete(usb_pcwd);
usb_pcwd_device = NULL;
return retval;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index b582d0cdc24f3..4f0898400c6de 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -71,7 +71,7 @@ config EDAC_E7XXX
config EDAC_E752X
tristate "Intel e752x (e7520, e7525, e7320)"
- depends on EDAC_MM_EDAC && PCI && X86
+ depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
help
Support for error detection and correction on the Intel
E7520, E7525, E7320 server chipsets.
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index 7636c1a58f9c7..23a9e1ea8e321 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/dmi.h>
-#include <linux/mutex.h>
#include <asm/io.h>
#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
@@ -71,10 +70,10 @@ static u8 km_activity;
static int rest_x;
static int rest_y;
-static DEFINE_MUTEX(hdaps_mutex);
+static DECLARE_MUTEX(hdaps_sem);
/*
- * __get_latch - Get the value from a given port. Callers must hold hdaps_mutex.
+ * __get_latch - Get the value from a given port. Callers must hold hdaps_sem.
*/
static inline u8 __get_latch(u16 port)
{
@@ -83,7 +82,7 @@ static inline u8 __get_latch(u16 port)
/*
* __check_latch - Check a port latch for a given value. Returns zero if the
- * port contains the given value. Callers must hold hdaps_mutex.
+ * port contains the given value. Callers must hold hdaps_sem.
*/
static inline int __check_latch(u16 port, u8 val)
{
@@ -94,7 +93,7 @@ static inline int __check_latch(u16 port, u8 val)
/*
* __wait_latch - Wait up to 100us for a port latch to get a certain value,
- * returning zero if the value is obtained. Callers must hold hdaps_mutex.
+ * returning zero if the value is obtained. Callers must hold hdaps_sem.
*/
static int __wait_latch(u16 port, u8 val)
{
@@ -111,7 +110,7 @@ static int __wait_latch(u16 port, u8 val)
/*
* __device_refresh - request a refresh from the accelerometer. Does not wait
- * for refresh to complete. Callers must hold hdaps_mutex.
+ * for refresh to complete. Callers must hold hdaps_sem.
*/
static void __device_refresh(void)
{
@@ -125,7 +124,7 @@ static void __device_refresh(void)
/*
* __device_refresh_sync - request a synchronous refresh from the
* accelerometer. We wait for the refresh to complete. Returns zero if
- * successful and nonzero on error. Callers must hold hdaps_mutex.
+ * successful and nonzero on error. Callers must hold hdaps_sem.
*/
static int __device_refresh_sync(void)
{
@@ -135,7 +134,7 @@ static int __device_refresh_sync(void)
/*
* __device_complete - indicate to the accelerometer that we are done reading
- * data, and then initiate an async refresh. Callers must hold hdaps_mutex.
+ * data, and then initiate an async refresh. Callers must hold hdaps_sem.
*/
static inline void __device_complete(void)
{
@@ -153,7 +152,7 @@ static int hdaps_readb_one(unsigned int port, u8 *val)
{
int ret;
- mutex_lock(&hdaps_mutex);
+ down(&hdaps_sem);
/* do a sync refresh -- we need to be sure that we read fresh data */
ret = __device_refresh_sync();
@@ -164,7 +163,7 @@ static int hdaps_readb_one(unsigned int port, u8 *val)
__device_complete();
out:
- mutex_unlock(&hdaps_mutex);
+ up(&hdaps_sem);
return ret;
}
@@ -199,9 +198,9 @@ static int hdaps_read_pair(unsigned int port1, unsigned int port2,
{
int ret;
- mutex_lock(&hdaps_mutex);
+ down(&hdaps_sem);
ret = __hdaps_read_pair(port1, port2, val1, val2);
- mutex_unlock(&hdaps_mutex);
+ up(&hdaps_sem);
return ret;
}
@@ -214,7 +213,7 @@ static int hdaps_device_init(void)
{
int total, ret = -ENXIO;
- mutex_lock(&hdaps_mutex);
+ down(&hdaps_sem);
outb(0x13, 0x1610);
outb(0x01, 0x161f);
@@ -280,7 +279,7 @@ static int hdaps_device_init(void)
}
out:
- mutex_unlock(&hdaps_mutex);
+ up(&hdaps_sem);
return ret;
}
@@ -314,7 +313,7 @@ static struct platform_driver hdaps_driver = {
};
/*
- * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_mutex.
+ * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_sem.
*/
static void hdaps_calibrate(void)
{
@@ -326,7 +325,7 @@ static void hdaps_mousedev_poll(unsigned long unused)
int x, y;
/* Cannot sleep. Try nonblockingly. If we fail, try again later. */
- if (!mutex_trylock(&hdaps_mutex)) {
+ if (down_trylock(&hdaps_sem)) {
mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD);
return;
}
@@ -341,7 +340,7 @@ static void hdaps_mousedev_poll(unsigned long unused)
mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
out:
- mutex_unlock(&hdaps_mutex);
+ up(&hdaps_sem);
}
@@ -421,9 +420,9 @@ static ssize_t hdaps_calibrate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- mutex_lock(&hdaps_mutex);
+ down(&hdaps_sem);
hdaps_calibrate();
- mutex_unlock(&hdaps_mutex);
+ up(&hdaps_sem);
return count;
}
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index ccf528d733bfb..a5017de72da5c 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -61,6 +61,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/leds.h>
#define _IDE_DISK
@@ -317,6 +318,8 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
return ide_stopped;
}
+ ledtrig_ide_activity();
+
pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
(unsigned long long)block, rq->nr_sectors,
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 0606bd2f6020f..9233b8109a0f3 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -375,7 +375,13 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
}
}
- ide_end_request(drive, 1, rq->hard_nr_sectors);
+ if (rq->rq_disk) {
+ ide_driver_t *drv;
+
+ drv = *(ide_driver_t **)rq->rq_disk->private_data;;
+ drv->end_request(drive, 1, rq->hard_nr_sectors);
+ } else
+ ide_end_request(drive, 1, rq->hard_nr_sectors);
}
/*
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 6213bd3caee50..4961f1e764a75 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -81,14 +81,14 @@ static const char ide_major[] = {
};
typedef struct ide_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
int ndev;
dev_node_t node;
int hd;
} ide_info_t;
-static void ide_release(dev_link_t *);
-static void ide_config(dev_link_t *);
+static void ide_release(struct pcmcia_device *);
+static int ide_config(struct pcmcia_device *);
static void ide_detach(struct pcmcia_device *p_dev);
@@ -103,10 +103,9 @@ static void ide_detach(struct pcmcia_device *p_dev);
======================================================================*/
-static int ide_attach(struct pcmcia_device *p_dev)
+static int ide_probe(struct pcmcia_device *link)
{
ide_info_t *info;
- dev_link_t *link;
DEBUG(0, "ide_attach()\n");
@@ -114,7 +113,9 @@ static int ide_attach(struct pcmcia_device *p_dev)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- link = &info->link; link->priv = info;
+
+ info->p_dev = link;
+ link->priv = info;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
@@ -122,16 +123,9 @@ static int ide_attach(struct pcmcia_device *p_dev)
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- ide_config(link);
-
- return 0;
+ return ide_config(link);
} /* ide_attach */
/*======================================================================
@@ -143,14 +137,11 @@ static int ide_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void ide_detach(struct pcmcia_device *p_dev)
+static void ide_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DEBUG(0, "ide_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG)
- ide_release(link);
+ ide_release(link);
kfree(link->priv);
} /* ide_detach */
@@ -177,9 +168,8 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void ide_config(dev_link_t *link)
+static int ide_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
ide_info_t *info = link->priv;
tuple_t tuple;
struct {
@@ -203,34 +193,30 @@ static void ide_config(dev_link_t *link)
tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &stk->parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &stk->parse));
link->conf.ConfigBase = stk->parse.config.base;
link->conf.Present = stk->parse.config.rmask[0];
tuple.DesiredTuple = CISTPL_MANFID;
- if (!pcmcia_get_first_tuple(handle, &tuple) &&
- !pcmcia_get_tuple_data(handle, &tuple) &&
- !pcmcia_parse_tuple(handle, &tuple, &stk->parse))
+ if (!pcmcia_get_first_tuple(link, &tuple) &&
+ !pcmcia_get_tuple_data(link, &tuple) &&
+ !pcmcia_parse_tuple(link, &tuple, &stk->parse))
is_kme = ((stk->parse.manfid.manf == MANFID_KME) &&
((stk->parse.manfid.card == PRODID_KME_KXLC005_A) ||
(stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/* Not sure if this is right... look up the current Vcc */
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &stk->conf));
- link->conf.Vcc = stk->conf.Vcc;
+ CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &stk->conf));
pass = io_base = ctl_base = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
- if (pcmcia_get_tuple_data(handle, &tuple) != 0) goto next_entry;
- if (pcmcia_parse_tuple(handle, &tuple, &stk->parse) != 0) goto next_entry;
+ if (pcmcia_get_tuple_data(link, &tuple) != 0) goto next_entry;
+ if (pcmcia_parse_tuple(link, &tuple, &stk->parse) != 0) goto next_entry;
/* Check for matching Vcc, unless we're desperate */
if (!pass) {
@@ -244,10 +230,10 @@ static void ide_config(dev_link_t *link)
}
if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
else if (stk->dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
stk->dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
if ((cfg->io.nwin > 0) || (stk->dflt.io.nwin > 0)) {
@@ -261,14 +247,14 @@ static void ide_config(dev_link_t *link)
link->io.NumPorts1 = 8;
link->io.BasePort2 = io->win[1].base;
link->io.NumPorts2 = (is_kme) ? 2 : 1;
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
io_base = link->io.BasePort1;
ctl_base = link->io.BasePort2;
} else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
link->io.NumPorts1 = io->win[0].len;
link->io.NumPorts2 = 0;
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
io_base = link->io.BasePort1;
ctl_base = link->io.BasePort1 + 0x0e;
@@ -281,16 +267,16 @@ static void ide_config(dev_link_t *link)
if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
memcpy(&stk->dflt, cfg, sizeof(stk->dflt));
if (pass) {
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
- } else if (pcmcia_get_next_tuple(handle, &tuple) != 0) {
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
+ } else if (pcmcia_get_next_tuple(link, &tuple) != 0) {
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
memset(&stk->dflt, 0, sizeof(stk->dflt));
pass++;
}
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
/* disable drive interrupts during IDE probe */
outb(0x02, ctl_base);
@@ -301,12 +287,12 @@ static void ide_config(dev_link_t *link)
/* retry registration in case device is still spinning up */
for (hd = -1, i = 0; i < 10; i++) {
- hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, handle);
+ hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
if (hd >= 0) break;
if (link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
hd = idecs_register(io_base + 0x10, ctl_base + 0x10,
- link->irq.AssignedIRQ, handle);
+ link->irq.AssignedIRQ, link);
if (hd >= 0) {
io_base += 0x10;
ctl_base += 0x10;
@@ -328,25 +314,23 @@ static void ide_config(dev_link_t *link)
info->node.major = ide_major[hd];
info->node.minor = 0;
info->hd = hd;
- link->dev = &info->node;
- printk(KERN_INFO "ide-cs: %s: Vcc = %d.%d, Vpp = %d.%d\n",
- info->node.dev_name, link->conf.Vcc / 10, link->conf.Vcc % 10,
- link->conf.Vpp1 / 10, link->conf.Vpp1 % 10);
+ link->dev_node = &info->node;
+ printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
+ info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
- link->state &= ~DEV_CONFIG_PENDING;
kfree(stk);
- return;
+ return 0;
err_mem:
printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n");
goto failed;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
kfree(stk);
ide_release(link);
- link->state &= ~DEV_CONFIG_PENDING;
+ return -ENODEV;
} /* ide_config */
/*======================================================================
@@ -357,7 +341,7 @@ failed:
======================================================================*/
-void ide_release(dev_link_t *link)
+void ide_release(struct pcmcia_device *link)
{
ide_info_t *info = link->priv;
@@ -369,37 +353,10 @@ void ide_release(dev_link_t *link)
ide_unregister(info->hd);
}
info->ndev = 0;
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
} /* ide_release */
-static int ide_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int ide_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (DEV_OK(link))
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
-}
/*======================================================================
@@ -459,11 +416,9 @@ static struct pcmcia_driver ide_cs_driver = {
.drv = {
.name = "ide-cs",
},
- .probe = ide_attach,
+ .probe = ide_probe,
.remove = ide_detach,
.id_table = ide_ids,
- .suspend = ide_suspend,
- .resume = ide_resume,
};
static int __init init_ide_cs(void)
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 2c765ca5aa506..f4206604db037 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -496,22 +496,17 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
/*
* This function finds the sbp2_command for a given outstanding SCpnt.
* Only looks at the inuse list.
+ * Must be called with scsi_id->sbp2_command_orb_lock held.
*/
-static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt)
+static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
+ struct scsi_id_instance_data *scsi_id, void *SCpnt)
{
struct sbp2_command_info *command;
- unsigned long flags;
- spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
- if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
- list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
- if (command->Current_SCpnt == SCpnt) {
- spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
+ if (!list_empty(&scsi_id->sbp2_command_orb_inuse))
+ list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list)
+ if (command->Current_SCpnt == SCpnt)
return command;
- }
- }
- }
- spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return NULL;
}
@@ -580,17 +575,15 @@ static void sbp2util_free_command_dma(struct sbp2_command_info *command)
/*
* This function moves a command to the completed orb list.
+ * Must be called with scsi_id->sbp2_command_orb_lock held.
*/
-static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
- struct sbp2_command_info *command)
+static void sbp2util_mark_command_completed(
+ struct scsi_id_instance_data *scsi_id,
+ struct sbp2_command_info *command)
{
- unsigned long flags;
-
- spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
list_del(&command->list);
sbp2util_free_command_dma(command);
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
- spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
}
/*
@@ -2148,7 +2141,9 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
* Matched status with command, now grab scsi command pointers and check status
*/
SCpnt = command->Current_SCpnt;
+ spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
sbp2util_mark_command_completed(scsi_id, command);
+ spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
if (SCpnt) {
@@ -2484,6 +2479,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
struct sbp2scsi_host_info *hi = scsi_id->hi;
struct sbp2_command_info *command;
+ unsigned long flags;
SBP2_ERR("aborting sbp2 command");
scsi_print_command(SCpnt);
@@ -2494,6 +2490,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
* Right now, just return any matching command structures
* to the free pool.
*/
+ spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
if (command) {
SBP2_DEBUG("Found command to abort");
@@ -2511,6 +2508,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
command->Current_done(command->Current_SCpnt);
}
}
+ spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
/*
* Initiate a fetch agent reset.
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index bdf0891a92dd5..afc612b8577dc 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -30,6 +30,7 @@ config INFINIBAND_USER_ACCESS
<http://www.openib.org>.
source "drivers/infiniband/hw/mthca/Kconfig"
+source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index a43fb34cca94d..eea27322a22d6 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
+obj-$(CONFIG_IPATH_CORE) += hw/ipath/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
new file mode 100644
index 0000000000000..9ea67c409b6d4
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -0,0 +1,16 @@
+config IPATH_CORE
+ tristate "PathScale InfiniPath Driver"
+ depends on 64BIT && PCI_MSI && NET
+ ---help---
+ This is a low-level driver for PathScale InfiniPath host channel
+ adapters (HCAs) based on the HT-400 and PE-800 chips.
+
+config INFINIBAND_IPATH
+ tristate "PathScale InfiniPath Verbs Driver"
+ depends on IPATH_CORE && INFINIBAND
+ ---help---
+ This is a driver that provides InfiniBand verbs support for
+ PathScale InfiniPath host channel adapters (HCAs). This
+ allows these devices to be used with both kernel upper level
+ protocols such as IP-over-InfiniBand as well as with userspace
+ applications (in conjunction with InfiniBand userspace access).
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
new file mode 100644
index 0000000000000..b4d084abfd22c
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -0,0 +1,36 @@
+EXTRA_CFLAGS += -DIPATH_IDSTR='"PathScale kernel.org driver"' \
+ -DIPATH_KERN_TYPE=0
+
+obj-$(CONFIG_IPATH_CORE) += ipath_core.o
+obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
+
+ipath_core-y := \
+ ipath_diag.o \
+ ipath_driver.o \
+ ipath_eeprom.o \
+ ipath_file_ops.o \
+ ipath_fs.o \
+ ipath_ht400.o \
+ ipath_init_chip.o \
+ ipath_intr.o \
+ ipath_layer.o \
+ ipath_pe800.o \
+ ipath_stats.o \
+ ipath_sysfs.o \
+ ipath_user_pages.o
+
+ipath_core-$(CONFIG_X86_64) += ipath_wc_x86_64.o
+
+ib_ipath-y := \
+ ipath_cq.o \
+ ipath_keys.o \
+ ipath_mad.o \
+ ipath_mr.o \
+ ipath_qp.o \
+ ipath_rc.o \
+ ipath_ruc.o \
+ ipath_srq.o \
+ ipath_uc.o \
+ ipath_ud.o \
+ ipath_verbs.o \
+ ipath_verbs_mcast.o
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
new file mode 100644
index 0000000000000..48a55247b832d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -0,0 +1,616 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _IPATH_COMMON_H
+#define _IPATH_COMMON_H
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+/* This is the IEEE-assigned OUI for PathScale, Inc. */
+#define IPATH_SRC_OUI_1 0x00
+#define IPATH_SRC_OUI_2 0x11
+#define IPATH_SRC_OUI_3 0x75
+
+/* version of protocol header (known to chip also). In the long run,
+ * we should be able to generate and accept a range of version numbers;
+ * for now we only accept one, and it's compiled in.
+ */
+#define IPS_PROTO_VERSION 2
+
+/*
+ * These are compile time constants that you may want to enable or disable
+ * if you are trying to debug problems with code or performance.
+ * IPATH_VERBOSE_TRACING define as 1 if you want additional tracing in
+ * fastpath code
+ * IPATH_TRACE_REGWRITES define as 1 if you want register writes to be
+ * traced in faspath code
+ * _IPATH_TRACING define as 0 if you want to remove all tracing in a
+ * compilation unit
+ * _IPATH_DEBUGGING define as 0 if you want to remove debug prints
+ */
+
+/*
+ * The value in the BTH QP field that InfiniPath uses to differentiate
+ * an infinipath protocol IB packet vs standard IB transport
+ */
+#define IPATH_KD_QP 0x656b79
+
+/*
+ * valid states passed to ipath_set_linkstate() user call
+ */
+#define IPATH_IB_LINKDOWN 0
+#define IPATH_IB_LINKARM 1
+#define IPATH_IB_LINKACTIVE 2
+#define IPATH_IB_LINKINIT 3
+#define IPATH_IB_LINKDOWN_SLEEP 4
+#define IPATH_IB_LINKDOWN_DISABLE 5
+
+/*
+ * stats maintained by the driver. For now, at least, this is global
+ * to all minor devices.
+ */
+struct infinipath_stats {
+ /* number of interrupts taken */
+ __u64 sps_ints;
+ /* number of interrupts for errors */
+ __u64 sps_errints;
+ /* number of errors from chip (not incl. packet errors or CRC) */
+ __u64 sps_errs;
+ /* number of packet errors from chip other than CRC */
+ __u64 sps_pkterrs;
+ /* number of packets with CRC errors (ICRC and VCRC) */
+ __u64 sps_crcerrs;
+ /* number of hardware errors reported (parity, etc.) */
+ __u64 sps_hwerrs;
+ /* number of times IB link changed state unexpectedly */
+ __u64 sps_iblink;
+ /* no longer used; left for compatibility */
+ __u64 sps_unused3;
+ /* number of kernel (port0) packets received */
+ __u64 sps_port0pkts;
+ /* number of "ethernet" packets sent by driver */
+ __u64 sps_ether_spkts;
+ /* number of "ethernet" packets received by driver */
+ __u64 sps_ether_rpkts;
+ /* number of SMA packets sent by driver */
+ __u64 sps_sma_spkts;
+ /* number of SMA packets received by driver */
+ __u64 sps_sma_rpkts;
+ /* number of times all ports rcvhdrq was full and packet dropped */
+ __u64 sps_hdrqfull;
+ /* number of times all ports egrtid was full and packet dropped */
+ __u64 sps_etidfull;
+ /*
+ * number of times we tried to send from driver, but no pio buffers
+ * avail
+ */
+ __u64 sps_nopiobufs;
+ /* number of ports currently open */
+ __u64 sps_ports;
+ /* list of pkeys (other than default) accepted (0 means not set) */
+ __u16 sps_pkeys[4];
+ /* lids for up to 4 infinipaths, indexed by infinipath # */
+ __u16 sps_lid[4];
+ /* number of user ports per chip (not IB ports) */
+ __u32 sps_nports;
+ /* not our interrupt, or already handled */
+ __u32 sps_nullintr;
+ /* max number of packets handled per receive call */
+ __u32 sps_maxpkts_call;
+ /* avg number of packets handled per receive call */
+ __u32 sps_avgpkts_call;
+ /* total number of pages locked */
+ __u64 sps_pagelocks;
+ /* total number of pages unlocked */
+ __u64 sps_pageunlocks;
+ /*
+ * Number of packets dropped in kernel other than errors (ether
+ * packets if ipath not configured, sma/mad, etc.)
+ */
+ __u64 sps_krdrops;
+ /* mlids for up to 4 infinipaths, indexed by infinipath # */
+ __u16 sps_mlid[4];
+ /* pad for future growth */
+ __u64 __sps_pad[45];
+};
+
+/*
+ * These are the status bits readable (in ascii form, 64bit value)
+ * from the "status" sysfs file.
+ */
+#define IPATH_STATUS_INITTED 0x1 /* basic initialization done */
+#define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */
+/* Device has been disabled via admin request */
+#define IPATH_STATUS_ADMIN_DISABLED 0x4
+#define IPATH_STATUS_OIB_SMA 0x8 /* ipath_mad kernel SMA running */
+#define IPATH_STATUS_SMA 0x10 /* user SMA running */
+/* Chip has been found and initted */
+#define IPATH_STATUS_CHIP_PRESENT 0x20
+/* IB link is at ACTIVE, usable for data traffic */
+#define IPATH_STATUS_IB_READY 0x40
+/* link is configured, LID, MTU, etc. have been set */
+#define IPATH_STATUS_IB_CONF 0x80
+/* no link established, probably no cable */
+#define IPATH_STATUS_IB_NOCABLE 0x100
+/* A Fatal hardware error has occurred. */
+#define IPATH_STATUS_HWERROR 0x200
+
+/*
+ * The list of usermode accessible registers. Also see Reg_* later in file.
+ */
+typedef enum _ipath_ureg {
+ /* (RO) DMA RcvHdr to be used next. */
+ ur_rcvhdrtail = 0,
+ /* (RW) RcvHdr entry to be processed next by host. */
+ ur_rcvhdrhead = 1,
+ /* (RO) Index of next Eager index to use. */
+ ur_rcvegrindextail = 2,
+ /* (RW) Eager TID to be processed next */
+ ur_rcvegrindexhead = 3,
+ /* For internal use only; max register number. */
+ _IPATH_UregMax
+} ipath_ureg;
+
+/* bit values for spi_runtime_flags */
+#define IPATH_RUNTIME_HT 0x1
+#define IPATH_RUNTIME_PCIE 0x2
+#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
+#define IPATH_RUNTIME_RCVHDR_COPY 0x8
+
+/*
+ * This structure is returned by ipath_userinit() immediately after
+ * open to get implementation-specific info, and info specific to this
+ * instance.
+ *
+ * This struct must have explict pad fields where type sizes
+ * may result in different alignments between 32 and 64 bit
+ * programs, since the 64 bit * bit kernel requires the user code
+ * to have matching offsets
+ */
+struct ipath_base_info {
+ /* version of hardware, for feature checking. */
+ __u32 spi_hw_version;
+ /* version of software, for feature checking. */
+ __u32 spi_sw_version;
+ /* InfiniPath port assigned, goes into sent packets */
+ __u32 spi_port;
+ /*
+ * IB MTU, packets IB data must be less than this.
+ * The MTU is in bytes, and will be a multiple of 4 bytes.
+ */
+ __u32 spi_mtu;
+ /*
+ * Size of a PIO buffer. Any given packet's total size must be less
+ * than this (in words). Included is the starting control word, so
+ * if 513 is returned, then total pkt size is 512 words or less.
+ */
+ __u32 spi_piosize;
+ /* size of the TID cache in infinipath, in entries */
+ __u32 spi_tidcnt;
+ /* size of the TID Eager list in infinipath, in entries */
+ __u32 spi_tidegrcnt;
+ /* size of a single receive header queue entry. */
+ __u32 spi_rcvhdrent_size;
+ /*
+ * Count of receive header queue entries allocated.
+ * This may be less than the spu_rcvhdrcnt passed in!.
+ */
+ __u32 spi_rcvhdr_cnt;
+
+ /* per-chip and other runtime features bitmap (IPATH_RUNTIME_*) */
+ __u32 spi_runtime_flags;
+
+ /* address where receive buffer queue is mapped into */
+ __u64 spi_rcvhdr_base;
+
+ /* user program. */
+
+ /* base address of eager TID receive buffers. */
+ __u64 spi_rcv_egrbufs;
+
+ /* Allocated by initialization code, not by protocol. */
+
+ /*
+ * Size of each TID buffer in host memory, starting at
+ * spi_rcv_egrbufs. The buffers are virtually contiguous.
+ */
+ __u32 spi_rcv_egrbufsize;
+ /*
+ * The special QP (queue pair) value that identifies an infinipath
+ * protocol packet from standard IB packets. More, probably much
+ * more, to be added.
+ */
+ __u32 spi_qpair;
+
+ /*
+ * User register base for init code, not to be used directly by
+ * protocol or applications.
+ */
+ __u64 __spi_uregbase;
+ /*
+ * Maximum buffer size in bytes that can be used in a single TID
+ * entry (assuming the buffer is aligned to this boundary). This is
+ * the minimum of what the hardware and software support Guaranteed
+ * to be a power of 2.
+ */
+ __u32 spi_tid_maxsize;
+ /*
+ * alignment of each pio send buffer (byte count
+ * to add to spi_piobufbase to get to second buffer)
+ */
+ __u32 spi_pioalign;
+ /*
+ * The index of the first pio buffer available to this process;
+ * needed to do lookup in spi_pioavailaddr; not added to
+ * spi_piobufbase.
+ */
+ __u32 spi_pioindex;
+ /* number of buffers mapped for this process */
+ __u32 spi_piocnt;
+
+ /*
+ * Base address of writeonly pio buffers for this process.
+ * Each buffer has spi_piosize words, and is aligned on spi_pioalign
+ * boundaries. spi_piocnt buffers are mapped from this address
+ */
+ __u64 spi_piobufbase;
+
+ /*
+ * Base address of readonly memory copy of the pioavail registers.
+ * There are 2 bits for each buffer.
+ */
+ __u64 spi_pioavailaddr;
+
+ /*
+ * Address where driver updates a copy of the interface and driver
+ * status (IPATH_STATUS_*) as a 64 bit value. It's followed by a
+ * string indicating hardware error, if there was one.
+ */
+ __u64 spi_status;
+
+ /* number of chip ports available to user processes */
+ __u32 spi_nports;
+ /* unit number of chip we are using */
+ __u32 spi_unit;
+ /* num bufs in each contiguous set */
+ __u32 spi_rcv_egrperchunk;
+ /* size in bytes of each contiguous set */
+ __u32 spi_rcv_egrchunksize;
+ /* total size of mmap to cover full rcvegrbuffers */
+ __u32 spi_rcv_egrbuftotlen;
+} __attribute__ ((aligned(8)));
+
+
+/*
+ * This version number is given to the driver by the user code during
+ * initialization in the spu_userversion field of ipath_user_info, so
+ * the driver can check for compatibility with user code.
+ *
+ * The major version changes when data structures
+ * change in an incompatible way. The driver must be the same or higher
+ * for initialization to succeed. In some cases, a higher version
+ * driver will not interoperate with older software, and initialization
+ * will return an error.
+ */
+#define IPATH_USER_SWMAJOR 1
+
+/*
+ * Minor version differences are always compatible
+ * a within a major version, however if if user software is larger
+ * than driver software, some new features and/or structure fields
+ * may not be implemented; the user code must deal with this if it
+ * cares, or it must abort after initialization reports the difference
+ */
+#define IPATH_USER_SWMINOR 2
+
+#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
+
+#define IPATH_KERN_TYPE 0
+
+/*
+ * Similarly, this is the kernel version going back to the user. It's
+ * slightly different, in that we want to tell if the driver was built as
+ * part of a PathScale release, or from the driver from OpenIB, kernel.org,
+ * or a standard distribution, for support reasons. The high bit is 0 for
+ * non-PathScale, and 1 for PathScale-built/supplied.
+ *
+ * It's returned by the driver to the user code during initialization in the
+ * spi_sw_version field of ipath_base_info, so the user code can in turn
+ * check for compatibility with the kernel.
+*/
+#define IPATH_KERN_SWVERSION ((IPATH_KERN_TYPE<<31) | IPATH_USER_SWVERSION)
+
+/*
+ * This structure is passed to ipath_userinit() to tell the driver where
+ * user code buffers are, sizes, etc. The offsets and sizes of the
+ * fields must remain unchanged, for binary compatibility. It can
+ * be extended, if userversion is changed so user code can tell, if needed
+ */
+struct ipath_user_info {
+ /*
+ * version of user software, to detect compatibility issues.
+ * Should be set to IPATH_USER_SWVERSION.
+ */
+ __u32 spu_userversion;
+
+ /* desired number of receive header queue entries */
+ __u32 spu_rcvhdrcnt;
+
+ /* size of struct base_info to write to */
+ __u32 spu_base_info_size;
+
+ /*
+ * number of words in KD protocol header
+ * This tells InfiniPath how many words to copy to rcvhdrq. If 0,
+ * kernel uses a default. Once set, attempts to set any other value
+ * are an error (EAGAIN) until driver is reloaded.
+ */
+ __u32 spu_rcvhdrsize;
+
+ /*
+ * cache line aligned (64 byte) user address to
+ * which the rcvhdrtail register will be written by infinipath
+ * whenever it changes, so that no chip registers are read in
+ * the performance path.
+ */
+ __u64 spu_rcvhdraddr;
+
+ /*
+ * address of struct base_info to write to
+ */
+ __u64 spu_base_info;
+
+} __attribute__ ((aligned(8)));
+
+/* User commands. */
+
+#define IPATH_CMD_MIN 16
+
+#define IPATH_CMD_USER_INIT 16 /* set up userspace */
+#define IPATH_CMD_PORT_INFO 17 /* find out what resources we got */
+#define IPATH_CMD_RECV_CTRL 18 /* control receipt of packets */
+#define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */
+#define IPATH_CMD_TID_FREE 20 /* free expected TID entries */
+#define IPATH_CMD_SET_PART_KEY 21 /* add partition key */
+
+#define IPATH_CMD_MAX 21
+
+struct ipath_port_info {
+ __u32 num_active; /* number of active units */
+ __u32 unit; /* unit (chip) assigned to caller */
+ __u32 port; /* port on unit assigned to caller */
+};
+
+struct ipath_tid_info {
+ __u32 tidcnt;
+ /* make structure same size in 32 and 64 bit */
+ __u32 tid__unused;
+ /* virtual address of first page in transfer */
+ __u64 tidvaddr;
+ /* pointer (same size 32/64 bit) to __u16 tid array */
+ __u64 tidlist;
+
+ /*
+ * pointer (same size 32/64 bit) to bitmap of TIDs used
+ * for this call; checked for being large enough at open
+ */
+ __u64 tidmap;
+};
+
+struct ipath_cmd {
+ __u32 type; /* command type */
+ union {
+ struct ipath_tid_info tid_info;
+ struct ipath_user_info user_info;
+ /* address in userspace of struct ipath_port_info to
+ write result to */
+ __u64 port_info;
+ /* enable/disable receipt of packets */
+ __u32 recv_ctrl;
+ /* partition key to set */
+ __u16 part_key;
+ } cmd;
+};
+
+struct ipath_iovec {
+ /* Pointer to data, but same size 32 and 64 bit */
+ __u64 iov_base;
+
+ /*
+ * Length of data; don't need 64 bits, but want
+ * ipath_sendpkt to remain same size as before 32 bit changes, so...
+ */
+ __u64 iov_len;
+};
+
+/*
+ * Describes a single packet for send. Each packet can have one or more
+ * buffers, but the total length (exclusive of IB headers) must be less
+ * than the MTU, and if using the PIO method, entire packet length,
+ * including IB headers, must be less than the ipath_piosize value (words).
+ * Use of this necessitates including sys/uio.h
+ */
+struct __ipath_sendpkt {
+ __u32 sps_flags; /* flags for packet (TBD) */
+ __u32 sps_cnt; /* number of entries to use in sps_iov */
+ /* array of iov's describing packet. TEMPORARY */
+ struct ipath_iovec sps_iov[4];
+};
+
+/* Passed into SMA special file's ->read and ->write methods. */
+struct ipath_sma_pkt
+{
+ __u32 unit; /* unit on which to send packet */
+ __u64 data; /* address of payload in userspace */
+ __u32 len; /* length of payload */
+};
+
+/*
+ * Data layout in I2C flash (for GUID, etc.)
+ * All fields are little-endian binary unless otherwise stated
+ */
+#define IPATH_FLASH_VERSION 1
+struct ipath_flash {
+ /* flash layout version (IPATH_FLASH_VERSION) */
+ __u8 if_fversion;
+ /* checksum protecting if_length bytes */
+ __u8 if_csum;
+ /*
+ * valid length (in use, protected by if_csum), including
+ * if_fversion and if_sum themselves)
+ */
+ __u8 if_length;
+ /* the GUID, in network order */
+ __u8 if_guid[8];
+ /* number of GUIDs to use, starting from if_guid */
+ __u8 if_numguid;
+ /* the board serial number, in ASCII */
+ char if_serial[12];
+ /* board mfg date (YYYYMMDD ASCII) */
+ char if_mfgdate[8];
+ /* last board rework/test date (YYYYMMDD ASCII) */
+ char if_testdate[8];
+ /* logging of error counts, TBD */
+ __u8 if_errcntp[4];
+ /* powered on hours, updated at driver unload */
+ __u8 if_powerhour[2];
+ /* ASCII free-form comment field */
+ char if_comment[32];
+ /* 78 bytes used, min flash size is 128 bytes */
+ __u8 if_future[50];
+};
+
+/*
+ * These are the counters implemented in the chip, and are listed in order.
+ * The InterCaps naming is taken straight from the chip spec.
+ */
+struct infinipath_counters {
+ __u64 LBIntCnt;
+ __u64 LBFlowStallCnt;
+ __u64 Reserved1;
+ __u64 TxUnsupVLErrCnt;
+ __u64 TxDataPktCnt;
+ __u64 TxFlowPktCnt;
+ __u64 TxDwordCnt;
+ __u64 TxLenErrCnt;
+ __u64 TxMaxMinLenErrCnt;
+ __u64 TxUnderrunCnt;
+ __u64 TxFlowStallCnt;
+ __u64 TxDroppedPktCnt;
+ __u64 RxDroppedPktCnt;
+ __u64 RxDataPktCnt;
+ __u64 RxFlowPktCnt;
+ __u64 RxDwordCnt;
+ __u64 RxLenErrCnt;
+ __u64 RxMaxMinLenErrCnt;
+ __u64 RxICRCErrCnt;
+ __u64 RxVCRCErrCnt;
+ __u64 RxFlowCtrlErrCnt;
+ __u64 RxBadFormatCnt;
+ __u64 RxLinkProblemCnt;
+ __u64 RxEBPCnt;
+ __u64 RxLPCRCErrCnt;
+ __u64 RxBufOvflCnt;
+ __u64 RxTIDFullErrCnt;
+ __u64 RxTIDValidErrCnt;
+ __u64 RxPKeyMismatchCnt;
+ __u64 RxP0HdrEgrOvflCnt;
+ __u64 RxP1HdrEgrOvflCnt;
+ __u64 RxP2HdrEgrOvflCnt;
+ __u64 RxP3HdrEgrOvflCnt;
+ __u64 RxP4HdrEgrOvflCnt;
+ __u64 RxP5HdrEgrOvflCnt;
+ __u64 RxP6HdrEgrOvflCnt;
+ __u64 RxP7HdrEgrOvflCnt;
+ __u64 RxP8HdrEgrOvflCnt;
+ __u64 Reserved6;
+ __u64 Reserved7;
+ __u64 IBStatusChangeCnt;
+ __u64 IBLinkErrRecoveryCnt;
+ __u64 IBLinkDownedCnt;
+ __u64 IBSymbolErrCnt;
+};
+
+/*
+ * The next set of defines are for packet headers, and chip register
+ * and memory bits that are visible to and/or used by user-mode software
+ * The other bits that are used only by the driver or diags are in
+ * ipath_registers.h
+ */
+
+/* RcvHdrFlags bits */
+#define INFINIPATH_RHF_LENGTH_MASK 0x7FF
+#define INFINIPATH_RHF_LENGTH_SHIFT 0
+#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
+#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
+#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF
+#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
+#define INFINIPATH_RHF_H_ICRCERR 0x80000000
+#define INFINIPATH_RHF_H_VCRCERR 0x40000000
+#define INFINIPATH_RHF_H_PARITYERR 0x20000000
+#define INFINIPATH_RHF_H_LENERR 0x10000000
+#define INFINIPATH_RHF_H_MTUERR 0x08000000
+#define INFINIPATH_RHF_H_IHDRERR 0x04000000
+#define INFINIPATH_RHF_H_TIDERR 0x02000000
+#define INFINIPATH_RHF_H_MKERR 0x01000000
+#define INFINIPATH_RHF_H_IBERR 0x00800000
+#define INFINIPATH_RHF_L_SWA 0x00008000
+#define INFINIPATH_RHF_L_SWB 0x00004000
+
+/* infinipath header fields */
+#define INFINIPATH_I_VERS_MASK 0xF
+#define INFINIPATH_I_VERS_SHIFT 28
+#define INFINIPATH_I_PORT_MASK 0xF
+#define INFINIPATH_I_PORT_SHIFT 24
+#define INFINIPATH_I_TID_MASK 0x7FF
+#define INFINIPATH_I_TID_SHIFT 13
+#define INFINIPATH_I_OFFSET_MASK 0x1FFF
+#define INFINIPATH_I_OFFSET_SHIFT 0
+
+/* K_PktFlags bits */
+#define INFINIPATH_KPF_INTR 0x1
+
+/* SendPIO per-buffer control */
+#define INFINIPATH_SP_LENGTHP1_MASK 0x3FF
+#define INFINIPATH_SP_LENGTHP1_SHIFT 0
+#define INFINIPATH_SP_INTR 0x80000000
+#define INFINIPATH_SP_TEST 0x40000000
+#define INFINIPATH_SP_TESTEBP 0x20000000
+
+/* SendPIOAvail bits */
+#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
+#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
+
+#endif /* _IPATH_COMMON_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
new file mode 100644
index 0000000000000..7ece1135ddfe8
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
+#include "ipath_verbs.h"
+
+/**
+ * ipath_cq_enter - add a new entry to the completion queue
+ * @cq: completion queue
+ * @entry: work completion entry to add
+ * @sig: true if @entry is a solicitated entry
+ *
+ * This may be called with one of the qp->s_lock or qp->r_rq.lock held.
+ */
+void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
+{
+ unsigned long flags;
+ u32 next;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ if (cq->head == cq->ibcq.cqe)
+ next = 0;
+ else
+ next = cq->head + 1;
+ if (unlikely(next == cq->tail)) {
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (cq->ibcq.event_handler) {
+ struct ib_event ev;
+
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+ return;
+ }
+ cq->queue[cq->head] = *entry;
+ cq->head = next;
+
+ if (cq->notify == IB_CQ_NEXT_COMP ||
+ (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ cq->notify = IB_CQ_NONE;
+ cq->triggered++;
+ /*
+ * This will cause send_complete() to be called in
+ * another thread.
+ */
+ tasklet_hi_schedule(&cq->comptask);
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ if (entry->status != IB_WC_SUCCESS)
+ to_idev(cq->ibcq.device)->n_wqe_errs++;
+}
+
+/**
+ * ipath_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * Returns the number of completion entries polled.
+ *
+ * This may be called from interrupt context. Also called by ib_poll_cq()
+ * in the generic verbs code.
+ */
+int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct ipath_cq *cq = to_icq(ibcq);
+ unsigned long flags;
+ int npolled;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+ if (cq->tail == cq->head)
+ break;
+ *entry = cq->queue[cq->tail];
+ if (cq->tail == cq->ibcq.cqe)
+ cq->tail = 0;
+ else
+ cq->tail++;
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return npolled;
+}
+
+static void send_complete(unsigned long data)
+{
+ struct ipath_cq *cq = (struct ipath_cq *)data;
+
+ /*
+ * The completion handler will most likely rearm the notification
+ * and poll for all pending entries. If a new completion entry
+ * is added while we are in this routine, tasklet_hi_schedule()
+ * won't call us again until we return so we check triggered to
+ * see if we need to call the handler again.
+ */
+ for (;;) {
+ u8 triggered = cq->triggered;
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+
+ if (cq->triggered == triggered)
+ return;
+ }
+}
+
+/**
+ * ipath_create_cq - create a completion queue
+ * @ibdev: the device this completion queue is attached to
+ * @entries: the minimum size of the completion queue
+ * @context: unused by the InfiniPath driver
+ * @udata: unused by the InfiniPath driver
+ *
+ * Returns a pointer to the completion queue or negative errno values
+ * for failure.
+ *
+ * Called by ib_create_cq() in the generic verbs code.
+ */
+struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct ipath_cq *cq;
+ struct ib_wc *wc;
+ struct ib_cq *ret;
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of
+ * entries.
+ */
+ cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ wc = vmalloc(sizeof(*wc) * (entries + 1));
+ if (!wc) {
+ kfree(cq);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ /*
+ * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
+ * The number of entries should be >= the number requested or return
+ * an error.
+ */
+ cq->ibcq.cqe = entries;
+ cq->notify = IB_CQ_NONE;
+ cq->triggered = 0;
+ spin_lock_init(&cq->lock);
+ tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
+ cq->head = 0;
+ cq->tail = 0;
+ cq->queue = wc;
+
+ ret = &cq->ibcq;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_destroy_cq - destroy a completion queue
+ * @ibcq: the completion queue to destroy.
+ *
+ * Returns 0 for success.
+ *
+ * Called by ib_destroy_cq() in the generic verbs code.
+ */
+int ipath_destroy_cq(struct ib_cq *ibcq)
+{
+ struct ipath_cq *cq = to_icq(ibcq);
+
+ tasklet_kill(&cq->comptask);
+ vfree(cq->queue);
+ kfree(cq);
+
+ return 0;
+}
+
+/**
+ * ipath_req_notify_cq - change the notification type for a completion queue
+ * @ibcq: the completion queue
+ * @notify: the type of notification to request
+ *
+ * Returns 0 for success.
+ *
+ * This may be called from interrupt context. Also called by
+ * ib_req_notify_cq() in the generic verbs code.
+ */
+int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
+{
+ struct ipath_cq *cq = to_icq(ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ /*
+ * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
+ * any other transitions.
+ */
+ if (cq->notify != IB_CQ_NEXT_COMP)
+ cq->notify = notify;
+ spin_unlock_irqrestore(&cq->lock, flags);
+ return 0;
+}
+
+int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ struct ipath_cq *cq = to_icq(ibcq);
+ struct ib_wc *wc, *old_wc;
+ u32 n;
+ int ret;
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ wc = vmalloc(sizeof(*wc) * (cqe + 1));
+ if (!wc) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ spin_lock_irq(&cq->lock);
+ if (cq->head < cq->tail)
+ n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
+ else
+ n = cq->head - cq->tail;
+ if (unlikely((u32)cqe < n)) {
+ spin_unlock_irq(&cq->lock);
+ vfree(wc);
+ ret = -EOVERFLOW;
+ goto bail;
+ }
+ for (n = 0; cq->tail != cq->head; n++) {
+ wc[n] = cq->queue[cq->tail];
+ if (cq->tail == cq->ibcq.cqe)
+ cq->tail = 0;
+ else
+ cq->tail++;
+ }
+ cq->ibcq.cqe = cqe;
+ cq->head = n;
+ cq->tail = 0;
+ old_wc = cq->queue;
+ cq->queue = wc;
+ spin_unlock_irq(&cq->lock);
+
+ vfree(old_wc);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
new file mode 100644
index 0000000000000..593e28969c692
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _IPATH_DEBUG_H
+#define _IPATH_DEBUG_H
+
+#ifndef _IPATH_DEBUGGING /* debugging enabled or not */
+#define _IPATH_DEBUGGING 1
+#endif
+
+#if _IPATH_DEBUGGING
+
+/*
+ * Mask values for debugging. The scheme allows us to compile out any
+ * of the debug tracing stuff, and if compiled in, to enable or disable
+ * dynamically. This can be set at modprobe time also:
+ * modprobe infinipath.ko infinipath_debug=7
+ */
+
+#define __IPATH_INFO 0x1 /* generic low verbosity stuff */
+#define __IPATH_DBG 0x2 /* generic debug */
+#define __IPATH_TRSAMPLE 0x8 /* generate trace buffer sample entries */
+/* leave some low verbosity spots open */
+#define __IPATH_VERBDBG 0x40 /* very verbose debug */
+#define __IPATH_PKTDBG 0x80 /* print packet data */
+/* print process startup (init)/exit messages */
+#define __IPATH_PROCDBG 0x100
+/* print mmap/nopage stuff, not using VDBG any more */
+#define __IPATH_MMDBG 0x200
+#define __IPATH_USER_SEND 0x1000 /* use user mode send */
+#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
+#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
+#define __IPATH_SMADBG 0x8000 /* sma packet debug */
+#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */
+#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */
+#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */
+#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */
+#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */
+
+#else /* _IPATH_DEBUGGING */
+
+/*
+ * define all of these even with debugging off, for the few places that do
+ * if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
+ * compiler eliminate the code
+ */
+
+#define __IPATH_INFO 0x0 /* generic low verbosity stuff */
+#define __IPATH_DBG 0x0 /* generic debug */
+#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
+#define __IPATH_VERBDBG 0x0 /* very verbose debug */
+#define __IPATH_PKTDBG 0x0 /* print packet data */
+#define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */
+/* print mmap/nopage stuff, not using VDBG any more */
+#define __IPATH_MMDBG 0x0
+#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
+#define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
+#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
+#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
+#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
+#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
+
+#endif /* _IPATH_DEBUGGING */
+
+#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
+
+#endif /* _IPATH_DEBUG_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
new file mode 100644
index 0000000000000..cd533cf951c26
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains support for diagnostic functions. It is accessed by
+ * opening the ipath_diag device, normally minor number 129. Diagnostic use
+ * of the InfiniPath chip may render the chip or board unusable until the
+ * driver is unloaded, or in some cases, until the system is rebooted.
+ *
+ * Accesses to the chip through this interface are not similar to going
+ * through the /sys/bus/pci resource mmap interface.
+ */
+
+#include <linux/pci.h>
+#include <asm/uaccess.h>
+
+#include "ipath_common.h"
+#include "ipath_kernel.h"
+#include "ips_common.h"
+#include "ipath_layer.h"
+
+int ipath_diag_inuse;
+static int diag_set_link;
+
+static int ipath_diag_open(struct inode *in, struct file *fp);
+static int ipath_diag_release(struct inode *in, struct file *fp);
+static ssize_t ipath_diag_read(struct file *fp, char __user *data,
+ size_t count, loff_t *off);
+static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+
+static struct file_operations diag_file_ops = {
+ .owner = THIS_MODULE,
+ .write = ipath_diag_write,
+ .read = ipath_diag_read,
+ .open = ipath_diag_open,
+ .release = ipath_diag_release
+};
+
+static struct cdev *diag_cdev;
+static struct class_device *diag_class_dev;
+
+int ipath_diag_init(void)
+{
+ return ipath_cdev_init(IPATH_DIAG_MINOR, "ipath_diag",
+ &diag_file_ops, &diag_cdev, &diag_class_dev);
+}
+
+void ipath_diag_cleanup(void)
+{
+ ipath_cdev_cleanup(&diag_cdev, &diag_class_dev);
+}
+
+/**
+ * ipath_read_umem64 - read a 64-bit quantity from the chip into user space
+ * @dd: the infinipath device
+ * @uaddr: the location to store the data in user memory
+ * @caddr: the source chip address (full pointer, not offset)
+ * @count: number of bytes to copy (multiple of 32 bits)
+ *
+ * This function also localizes all chip memory accesses.
+ * The copy should be written such that we read full cacheline packets
+ * from the chip. This is usually used for a single qword
+ *
+ * NOTE: This assumes the chip address is 64-bit aligned.
+ */
+static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
+ const void __iomem *caddr, size_t count)
+{
+ const u64 __iomem *reg_addr = caddr;
+ const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
+ int ret;
+
+ /* not very efficient, but it works for now */
+ if (reg_addr < dd->ipath_kregbase ||
+ reg_end > dd->ipath_kregend) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ while (reg_addr < reg_end) {
+ u64 data = readq(reg_addr);
+ if (copy_to_user(uaddr, &data, sizeof(u64))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ reg_addr++;
+ uaddr++;
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/**
+ * ipath_write_umem64 - write a 64-bit quantity to the chip from user space
+ * @dd: the infinipath device
+ * @caddr: the destination chip address (full pointer, not offset)
+ * @uaddr: the source of the data in user memory
+ * @count: the number of bytes to copy (multiple of 32 bits)
+ *
+ * This is usually used for a single qword
+ * NOTE: This assumes the chip address is 64-bit aligned.
+ */
+
+static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
+ const void __user *uaddr, size_t count)
+{
+ u64 __iomem *reg_addr = caddr;
+ const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
+ int ret;
+
+ /* not very efficient, but it works for now */
+ if (reg_addr < dd->ipath_kregbase ||
+ reg_end > dd->ipath_kregend) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ while (reg_addr < reg_end) {
+ u64 data;
+ if (copy_from_user(&data, uaddr, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ writeq(data, reg_addr);
+
+ reg_addr++;
+ uaddr++;
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/**
+ * ipath_read_umem32 - read a 32-bit quantity from the chip into user space
+ * @dd: the infinipath device
+ * @uaddr: the location to store the data in user memory
+ * @caddr: the source chip address (full pointer, not offset)
+ * @count: number of bytes to copy
+ *
+ * read 32 bit values, not 64 bit; for memories that only
+ * support 32 bit reads; usually a single dword.
+ */
+static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
+ const void __iomem *caddr, size_t count)
+{
+ const u32 __iomem *reg_addr = caddr;
+ const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
+ int ret;
+
+ if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
+ reg_end > (u32 __iomem *) dd->ipath_kregend) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u32 data = readl(reg_addr);
+ if (copy_to_user(uaddr, &data, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ reg_addr++;
+ uaddr++;
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/**
+ * ipath_write_umem32 - write a 32-bit quantity to the chip from user space
+ * @dd: the infinipath device
+ * @caddr: the destination chip address (full pointer, not offset)
+ * @uaddr: the source of the data in user memory
+ * @count: number of bytes to copy
+ *
+ * write 32 bit values, not 64 bit; for memories that only
+ * support 32 bit write; usually a single dword.
+ */
+
+static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
+ const void __user *uaddr, size_t count)
+{
+ u32 __iomem *reg_addr = caddr;
+ const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
+ int ret;
+
+ if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
+ reg_end > (u32 __iomem *) dd->ipath_kregend) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ while (reg_addr < reg_end) {
+ u32 data;
+ if (copy_from_user(&data, uaddr, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ writel(data, reg_addr);
+
+ reg_addr++;
+ uaddr++;
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+static int ipath_diag_open(struct inode *in, struct file *fp)
+{
+ struct ipath_devdata *dd;
+ int unit = 0; /* XXX this is bogus */
+ unsigned long flags;
+ int ret;
+
+ dd = ipath_lookup(unit);
+
+ mutex_lock(&ipath_mutex);
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ if (ipath_diag_inuse) {
+ ret = -EBUSY;
+ goto bail;
+ }
+
+ list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
+ /*
+ * we need at least one infinipath device to be present
+ * (don't use INITTED, because we want to be able to open
+ * even if device is in freeze mode, which cleared INITTED).
+ * There is a small amount of risk to this, which is why we
+ * also verify kregbase is set.
+ */
+
+ if (!(dd->ipath_flags & IPATH_PRESENT) ||
+ !dd->ipath_kregbase)
+ continue;
+
+ ipath_diag_inuse = 1;
+ diag_set_link = 0;
+ ret = 0;
+ goto bail;
+ }
+
+ ret = -ENODEV;
+
+bail:
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ mutex_unlock(&ipath_mutex);
+
+ /* Only expose a way to reset the device if we
+ make it into diag mode. */
+ if (ret == 0)
+ ipath_expose_reset(&dd->pcidev->dev);
+
+ return ret;
+}
+
+static int ipath_diag_release(struct inode *i, struct file *f)
+{
+ mutex_lock(&ipath_mutex);
+ ipath_diag_inuse = 0;
+ mutex_unlock(&ipath_mutex);
+ return 0;
+}
+
+static ssize_t ipath_diag_read(struct file *fp, char __user *data,
+ size_t count, loff_t *off)
+{
+ int unit = 0; /* XXX provide for reads on other units some day */
+ struct ipath_devdata *dd;
+ void __iomem *kreg_base;
+ ssize_t ret;
+
+ dd = ipath_lookup(unit);
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ kreg_base = dd->ipath_kregbase;
+
+ if (count == 0)
+ ret = 0;
+ else if ((count % 4) || (*off % 4))
+ /* address or length is not 32-bit aligned, hence invalid */
+ ret = -EINVAL;
+ else if ((count % 8) || (*off % 8))
+ /* address or length not 64-bit aligned; do 32-bit reads */
+ ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
+ else
+ ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
+
+ if (ret >= 0) {
+ *off += count;
+ ret = count;
+ }
+
+bail:
+ return ret;
+}
+
+static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ int unit = 0; /* XXX this is bogus */
+ struct ipath_devdata *dd;
+ void __iomem *kreg_base;
+ ssize_t ret;
+
+ dd = ipath_lookup(unit);
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+ kreg_base = dd->ipath_kregbase;
+
+ if (count == 0)
+ ret = 0;
+ else if ((count % 4) || (*off % 4))
+ /* address or length is not 32-bit aligned, hence invalid */
+ ret = -EINVAL;
+ else if ((count % 8) || (*off % 8))
+ /* address or length not 64-bit aligned; do 32-bit writes */
+ ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
+ else
+ ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
+
+ if (ret >= 0) {
+ *off += count;
+ ret = count;
+ }
+
+bail:
+ return ret;
+}
+
+void ipath_diag_bringup_link(struct ipath_devdata *dd)
+{
+ if (diag_set_link || (dd->ipath_flags & IPATH_LINKACTIVE))
+ return;
+
+ diag_set_link = 1;
+ ipath_cdbg(VERBOSE, "Trying to set to set link active for "
+ "diag pkt\n");
+ ipath_layer_set_linkstate(dd, IPATH_IB_LINKARM);
+ ipath_layer_set_linkstate(dd, IPATH_IB_LINKACTIVE);
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
new file mode 100644
index 0000000000000..58a94efb0070b
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -0,0 +1,1983 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "ipath_kernel.h"
+#include "ips_common.h"
+#include "ipath_layer.h"
+
+static void ipath_update_pio_bufs(struct ipath_devdata *);
+
+const char *ipath_get_unit_name(int unit)
+{
+ static char iname[16];
+ snprintf(iname, sizeof iname, "infinipath%u", unit);
+ return iname;
+}
+
+EXPORT_SYMBOL_GPL(ipath_get_unit_name);
+
+#define DRIVER_LOAD_MSG "PathScale " IPATH_DRV_NAME " loaded: "
+#define PFX IPATH_DRV_NAME ": "
+
+/*
+ * The size has to be longer than this string, so we can append
+ * board/chip information to it in the init code.
+ */
+const char ipath_core_version[] = IPATH_IDSTR "\n";
+
+static struct idr unit_table;
+DEFINE_SPINLOCK(ipath_devs_lock);
+LIST_HEAD(ipath_dev_list);
+
+wait_queue_head_t ipath_sma_state_wait;
+
+unsigned ipath_debug = __IPATH_INFO;
+
+module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(debug, "mask for debug prints");
+EXPORT_SYMBOL_GPL(ipath_debug);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("PathScale <support@pathscale.com>");
+MODULE_DESCRIPTION("Pathscale InfiniPath driver");
+
+const char *ipath_ibcstatus_str[] = {
+ "Disabled",
+ "LinkUp",
+ "PollActive",
+ "PollQuiet",
+ "SleepDelay",
+ "SleepQuiet",
+ "LState6", /* unused */
+ "LState7", /* unused */
+ "CfgDebounce",
+ "CfgRcvfCfg",
+ "CfgWaitRmt",
+ "CfgIdle",
+ "RecovRetrain",
+ "LState0xD", /* unused */
+ "RecovWaitRmt",
+ "RecovIdle",
+};
+
+/*
+ * These variables are initialized in the chip-specific files
+ * but are defined here.
+ */
+u16 ipath_gpio_sda_num, ipath_gpio_scl_num;
+u64 ipath_gpio_sda, ipath_gpio_scl;
+u64 infinipath_i_bitsextant;
+ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
+u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
+
+static void __devexit ipath_remove_one(struct pci_dev *);
+static int __devinit ipath_init_one(struct pci_dev *,
+ const struct pci_device_id *);
+
+/* Only needed for registration, nothing else needs this info */
+#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
+#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
+#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
+
+static const struct pci_device_id ipath_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
+ PCI_DEVICE_ID_INFINIPATH_HT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
+ PCI_DEVICE_ID_INFINIPATH_PE800)},
+};
+
+MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
+
+static struct pci_driver ipath_driver = {
+ .name = IPATH_DRV_NAME,
+ .probe = ipath_init_one,
+ .remove = __devexit_p(ipath_remove_one),
+ .id_table = ipath_pci_tbl,
+};
+
+/*
+ * This is where port 0's rcvhdrtail register is written back; we also
+ * want nothing else sharing the cache line, so make it a cache line
+ * in size. Used for all units.
+ */
+volatile __le64 *ipath_port0_rcvhdrtail;
+dma_addr_t ipath_port0_rcvhdrtail_dma;
+static int port0_rcvhdrtail_refs;
+
+static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
+ u32 *bar0, u32 *bar1)
+{
+ int ret;
+
+ ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
+ if (ret)
+ ipath_dev_err(dd, "failed to read bar0 before enable: "
+ "error %d\n", -ret);
+
+ ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
+ if (ret)
+ ipath_dev_err(dd, "failed to read bar1 before enable: "
+ "error %d\n", -ret);
+
+ ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
+}
+
+static void ipath_free_devdata(struct pci_dev *pdev,
+ struct ipath_devdata *dd)
+{
+ unsigned long flags;
+
+ pci_set_drvdata(pdev, NULL);
+
+ if (dd->ipath_unit != -1) {
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+ idr_remove(&unit_table, dd->ipath_unit);
+ list_del(&dd->ipath_list);
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ }
+ dma_free_coherent(&pdev->dev, sizeof(*dd), dd, dd->ipath_dma_addr);
+}
+
+static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
+{
+ unsigned long flags;
+ struct ipath_devdata *dd;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
+ dd = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dd = dma_alloc_coherent(&pdev->dev, sizeof(*dd), &dma_addr,
+ GFP_KERNEL);
+
+ if (!dd) {
+ dd = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dd->ipath_dma_addr = dma_addr;
+ dd->ipath_unit = -1;
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
+ if (ret < 0) {
+ printk(KERN_ERR IPATH_DRV_NAME
+ ": Could not allocate unit ID: error %d\n", -ret);
+ ipath_free_devdata(pdev, dd);
+ dd = ERR_PTR(ret);
+ goto bail_unlock;
+ }
+
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
+
+ list_add(&dd->ipath_list, &ipath_dev_list);
+
+bail_unlock:
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+bail:
+ return dd;
+}
+
+static inline struct ipath_devdata *__ipath_lookup(int unit)
+{
+ return idr_find(&unit_table, unit);
+}
+
+struct ipath_devdata *ipath_lookup(int unit)
+{
+ struct ipath_devdata *dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+ dd = __ipath_lookup(unit);
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+ return dd;
+}
+
+int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp)
+{
+ int nunits, npresent, nup;
+ struct ipath_devdata *dd;
+ unsigned long flags;
+ u32 maxports;
+
+ nunits = npresent = nup = maxports = 0;
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
+ nunits++;
+ if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
+ npresent++;
+ if (dd->ipath_lid &&
+ !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
+ | IPATH_LINKUNK)))
+ nup++;
+ if (dd->ipath_cfgports > maxports)
+ maxports = dd->ipath_cfgports;
+ }
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+ if (npresentp)
+ *npresentp = npresent;
+ if (nupp)
+ *nupp = nup;
+ if (maxportsp)
+ *maxportsp = maxports;
+
+ return nunits;
+}
+
+static int init_port0_rcvhdrtail(struct pci_dev *pdev)
+{
+ int ret;
+
+ mutex_lock(&ipath_mutex);
+
+ if (!ipath_port0_rcvhdrtail) {
+ ipath_port0_rcvhdrtail =
+ dma_alloc_coherent(&pdev->dev,
+ IPATH_PORT0_RCVHDRTAIL_SIZE,
+ &ipath_port0_rcvhdrtail_dma,
+ GFP_KERNEL);
+
+ if (!ipath_port0_rcvhdrtail) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ }
+ port0_rcvhdrtail_refs++;
+ ret = 0;
+
+bail:
+ mutex_unlock(&ipath_mutex);
+
+ return ret;
+}
+
+static void cleanup_port0_rcvhdrtail(struct pci_dev *pdev)
+{
+ mutex_lock(&ipath_mutex);
+
+ if (!--port0_rcvhdrtail_refs) {
+ dma_free_coherent(&pdev->dev, IPATH_PORT0_RCVHDRTAIL_SIZE,
+ (void *) ipath_port0_rcvhdrtail,
+ ipath_port0_rcvhdrtail_dma);
+ ipath_port0_rcvhdrtail = NULL;
+ }
+
+ mutex_unlock(&ipath_mutex);
+}
+
+/*
+ * These next two routines are placeholders in case we don't have per-arch
+ * code for controlling write combining. If explicit control of write
+ * combining is not available, performance will probably be awful.
+ */
+
+int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
+{
+ return -EOPNOTSUPP;
+}
+
+void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
+{
+}
+
+static int __devinit ipath_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret, len, j;
+ struct ipath_devdata *dd;
+ unsigned long long addr;
+ u32 bar0 = 0, bar1 = 0;
+ u8 rev;
+
+ ret = init_port0_rcvhdrtail(pdev);
+ if (ret < 0) {
+ printk(KERN_ERR IPATH_DRV_NAME
+ ": Could not allocate port0_rcvhdrtail: error %d\n",
+ -ret);
+ goto bail;
+ }
+
+ dd = ipath_alloc_devdata(pdev);
+ if (IS_ERR(dd)) {
+ ret = PTR_ERR(dd);
+ printk(KERN_ERR IPATH_DRV_NAME
+ ": Could not allocate devdata: error %d\n", -ret);
+ goto bail_rcvhdrtail;
+ }
+
+ ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
+
+ read_bars(dd, pdev, &bar0, &bar1);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ /* This can happen iff:
+ *
+ * We did a chip reset, and then failed to reprogram the
+ * BAR, or the chip reset due to an internal error. We then
+ * unloaded the driver and reloaded it.
+ *
+ * Both reset cases set the BAR back to initial state. For
+ * the latter case, the AER sticky error bit at offset 0x718
+ * should be set, but the Linux kernel doesn't yet know
+ * about that, it appears. If the original BAR was retained
+ * in the kernel data structures, this may be OK.
+ */
+ ipath_dev_err(dd, "enable unit %d failed: error %d\n",
+ dd->ipath_unit, -ret);
+ goto bail_devdata;
+ }
+ addr = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %x, vend %x/%x "
+ "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
+ ent->device, ent->driver_data);
+
+ read_bars(dd, pdev, &bar0, &bar1);
+
+ if (!bar1 && !(bar0 & ~0xf)) {
+ if (addr) {
+ dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
+ "rewriting as %llx\n", addr);
+ ret = pci_write_config_dword(
+ pdev, PCI_BASE_ADDRESS_0, addr);
+ if (ret) {
+ ipath_dev_err(dd, "rewrite of BAR0 "
+ "failed: err %d\n", -ret);
+ goto bail_disable;
+ }
+ ret = pci_write_config_dword(
+ pdev, PCI_BASE_ADDRESS_1, addr >> 32);
+ if (ret) {
+ ipath_dev_err(dd, "rewrite of BAR1 "
+ "failed: err %d\n", -ret);
+ goto bail_disable;
+ }
+ } else {
+ ipath_dev_err(dd, "BAR is 0 (probable RESET), "
+ "not usable until reboot\n");
+ ret = -ENODEV;
+ goto bail_disable;
+ }
+ }
+
+ ret = pci_request_regions(pdev, IPATH_DRV_NAME);
+ if (ret) {
+ dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
+ "err %d\n", dd->ipath_unit, -ret);
+ goto bail_disable;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ if (ret) {
+ dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
+ "fails: %d\n", dd->ipath_unit, ret);
+ goto bail_regions;
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * Save BARs to rewrite after device reset. Save all 64 bits of
+ * BAR, just in case.
+ */
+ dd->ipath_pcibar0 = addr;
+ dd->ipath_pcibar1 = addr >> 32;
+ dd->ipath_deviceid = ent->device; /* save for later use */
+ dd->ipath_vendorid = ent->vendor;
+
+ /* setup the chip-specific functions, as early as possible. */
+ switch (ent->device) {
+ case PCI_DEVICE_ID_INFINIPATH_HT:
+ ipath_init_ht400_funcs(dd);
+ break;
+ case PCI_DEVICE_ID_INFINIPATH_PE800:
+ ipath_init_pe800_funcs(dd);
+ break;
+ default:
+ ipath_dev_err(dd, "Found unknown PathScale deviceid 0x%x, "
+ "failing\n", ent->device);
+ return -ENODEV;
+ }
+
+ for (j = 0; j < 6; j++) {
+ if (!pdev->resource[j].start)
+ continue;
+ ipath_cdbg(VERBOSE, "BAR %d start %lx, end %lx, len %lx\n",
+ j, pdev->resource[j].start,
+ pdev->resource[j].end,
+ pci_resource_len(pdev, j));
+ }
+
+ if (!addr) {
+ ipath_dev_err(dd, "No valid address in BAR 0!\n");
+ ret = -ENODEV;
+ goto bail_regions;
+ }
+
+ dd->ipath_deviceid = ent->device; /* save for later use */
+ dd->ipath_vendorid = ent->vendor;
+
+ ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+ if (ret) {
+ ipath_dev_err(dd, "Failed to read PCI revision ID unit "
+ "%u: err %d\n", dd->ipath_unit, -ret);
+ goto bail_regions; /* shouldn't ever happen */
+ }
+ dd->ipath_pcirev = rev;
+
+ dd->ipath_kregbase = ioremap_nocache(addr, len);
+
+ if (!dd->ipath_kregbase) {
+ ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
+ addr);
+ ret = -ENOMEM;
+ goto bail_iounmap;
+ }
+ dd->ipath_kregend = (u64 __iomem *)
+ ((void __iomem *)dd->ipath_kregbase + len);
+ dd->ipath_physaddr = addr; /* used for io_remap, etc. */
+ /* for user mmap */
+ dd->ipath_kregvirt = (u64 __iomem *) phys_to_virt(addr);
+ ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p "
+ "kregvirt %p\n", addr, dd->ipath_kregbase,
+ dd->ipath_kregvirt);
+
+ /*
+ * clear ipath_flags here instead of in ipath_init_chip as it is set
+ * by ipath_setup_htconfig.
+ */
+ dd->ipath_flags = 0;
+
+ if (dd->ipath_f_bus(dd, pdev))
+ ipath_dev_err(dd, "Failed to setup config space; "
+ "continuing anyway\n");
+
+ /*
+ * set up our interrupt handler; SA_SHIRQ probably not needed,
+ * since MSI interrupts shouldn't be shared but won't hurt for now.
+ * check 0 irq after we return from chip-specific bus setup, since
+ * that can affect this due to setup
+ */
+ if (!pdev->irq)
+ ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
+ "work\n");
+ else {
+ ret = request_irq(pdev->irq, ipath_intr, SA_SHIRQ,
+ IPATH_DRV_NAME, dd);
+ if (ret) {
+ ipath_dev_err(dd, "Couldn't setup irq handler, "
+ "irq=%u: %d\n", pdev->irq, ret);
+ goto bail_iounmap;
+ }
+ }
+
+ ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
+ if (ret)
+ goto bail_iounmap;
+
+ ret = ipath_enable_wc(dd);
+
+ if (ret) {
+ ipath_dev_err(dd, "Write combining not enabled "
+ "(err %d): performance may be poor\n",
+ -ret);
+ ret = 0;
+ }
+
+ ipath_device_create_group(&pdev->dev, dd);
+ ipathfs_add_device(dd);
+ ipath_user_add(dd);
+ ipath_layer_add(dd);
+
+ goto bail;
+
+bail_iounmap:
+ iounmap((volatile void __iomem *) dd->ipath_kregbase);
+
+bail_regions:
+ pci_release_regions(pdev);
+
+bail_disable:
+ pci_disable_device(pdev);
+
+bail_devdata:
+ ipath_free_devdata(pdev, dd);
+
+bail_rcvhdrtail:
+ cleanup_port0_rcvhdrtail(pdev);
+
+bail:
+ return ret;
+}
+
+static void __devexit ipath_remove_one(struct pci_dev *pdev)
+{
+ struct ipath_devdata *dd;
+
+ ipath_cdbg(VERBOSE, "removing, pdev=%p\n", pdev);
+ if (!pdev)
+ return;
+
+ dd = pci_get_drvdata(pdev);
+ ipath_layer_del(dd);
+ ipath_user_del(dd);
+ ipathfs_remove_device(dd);
+ ipath_device_remove_group(&pdev->dev, dd);
+ ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
+ "unit %u\n", dd, (u32) dd->ipath_unit);
+ if (dd->ipath_kregbase) {
+ ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n",
+ dd->ipath_kregbase);
+ iounmap((volatile void __iomem *) dd->ipath_kregbase);
+ dd->ipath_kregbase = NULL;
+ }
+ pci_release_regions(pdev);
+ ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
+ pci_disable_device(pdev);
+
+ ipath_free_devdata(pdev, dd);
+ cleanup_port0_rcvhdrtail(pdev);
+}
+
+/* general driver use */
+DEFINE_MUTEX(ipath_mutex);
+
+static DEFINE_SPINLOCK(ipath_pioavail_lock);
+
+/**
+ * ipath_disarm_piobufs - cancel a range of PIO buffers
+ * @dd: the infinipath device
+ * @first: the first PIO buffer to cancel
+ * @cnt: the number of PIO buffers to cancel
+ *
+ * cancel a range of PIO buffers, used when they might be armed, but
+ * not triggered. Used at init to ensure buffer state, and also user
+ * process close, in case it died while writing to a PIO buffer
+ * Also after errors.
+ */
+void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
+ unsigned cnt)
+{
+ unsigned i, last = first + cnt;
+ u64 sendctrl, sendorig;
+
+ ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
+ sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM;
+ for (i = first; i < last; i++) {
+ sendctrl = sendorig |
+ (i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ sendctrl);
+ }
+
+ /*
+ * Write it again with current value, in case ipath_sendctrl changed
+ * while we were looping; no critical bits that would require
+ * locking.
+ *
+ * Write a 0, and then the original value, reading scratch in
+ * between. This seems to avoid a chip timing race that causes
+ * pioavail updates to memory to stop.
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ 0);
+ sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+}
+
+/**
+ * ipath_wait_linkstate - wait for an IB link state change to occur
+ * @dd: the infinipath device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * wait up to msecs milliseconds for IB link state change to occur for
+ * now, take the easy polling route. Currently used only by
+ * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise
+ * -ETIMEDOUT state can have multiple states set, for any of several
+ * transitions.
+ */
+int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
+{
+ dd->ipath_sma_state_wanted = state;
+ wait_event_interruptible_timeout(ipath_sma_state_wait,
+ (dd->ipath_flags & state),
+ msecs_to_jiffies(msecs));
+ dd->ipath_sma_state_wanted = 0;
+
+ if (!(dd->ipath_flags & state)) {
+ u64 val;
+ ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n",
+ /* test INIT ahead of DOWN, both can be set */
+ (state & IPATH_LINKINIT) ? "INIT" :
+ ((state & IPATH_LINKDOWN) ? "DOWN" :
+ ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
+ msecs);
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+ ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
+ (unsigned long long) ipath_read_kreg64(
+ dd, dd->ipath_kregs->kr_ibcctrl),
+ (unsigned long long) val,
+ ipath_ibcstatus_str[val & 0xf]);
+ }
+ return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
+}
+
+void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
+{
+ *buf = '\0';
+ if (err & INFINIPATH_E_RHDRLEN)
+ strlcat(buf, "rhdrlen ", blen);
+ if (err & INFINIPATH_E_RBADTID)
+ strlcat(buf, "rbadtid ", blen);
+ if (err & INFINIPATH_E_RBADVERSION)
+ strlcat(buf, "rbadversion ", blen);
+ if (err & INFINIPATH_E_RHDR)
+ strlcat(buf, "rhdr ", blen);
+ if (err & INFINIPATH_E_RLONGPKTLEN)
+ strlcat(buf, "rlongpktlen ", blen);
+ if (err & INFINIPATH_E_RSHORTPKTLEN)
+ strlcat(buf, "rshortpktlen ", blen);
+ if (err & INFINIPATH_E_RMAXPKTLEN)
+ strlcat(buf, "rmaxpktlen ", blen);
+ if (err & INFINIPATH_E_RMINPKTLEN)
+ strlcat(buf, "rminpktlen ", blen);
+ if (err & INFINIPATH_E_RFORMATERR)
+ strlcat(buf, "rformaterr ", blen);
+ if (err & INFINIPATH_E_RUNSUPVL)
+ strlcat(buf, "runsupvl ", blen);
+ if (err & INFINIPATH_E_RUNEXPCHAR)
+ strlcat(buf, "runexpchar ", blen);
+ if (err & INFINIPATH_E_RIBFLOW)
+ strlcat(buf, "ribflow ", blen);
+ if (err & INFINIPATH_E_REBP)
+ strlcat(buf, "EBP ", blen);
+ if (err & INFINIPATH_E_SUNDERRUN)
+ strlcat(buf, "sunderrun ", blen);
+ if (err & INFINIPATH_E_SPIOARMLAUNCH)
+ strlcat(buf, "spioarmlaunch ", blen);
+ if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
+ strlcat(buf, "sunexperrpktnum ", blen);
+ if (err & INFINIPATH_E_SDROPPEDDATAPKT)
+ strlcat(buf, "sdroppeddatapkt ", blen);
+ if (err & INFINIPATH_E_SDROPPEDSMPPKT)
+ strlcat(buf, "sdroppedsmppkt ", blen);
+ if (err & INFINIPATH_E_SMAXPKTLEN)
+ strlcat(buf, "smaxpktlen ", blen);
+ if (err & INFINIPATH_E_SMINPKTLEN)
+ strlcat(buf, "sminpktlen ", blen);
+ if (err & INFINIPATH_E_SUNSUPVL)
+ strlcat(buf, "sunsupVL ", blen);
+ if (err & INFINIPATH_E_SPKTLEN)
+ strlcat(buf, "spktlen ", blen);
+ if (err & INFINIPATH_E_INVALIDADDR)
+ strlcat(buf, "invalidaddr ", blen);
+ if (err & INFINIPATH_E_RICRC)
+ strlcat(buf, "CRC ", blen);
+ if (err & INFINIPATH_E_RVCRC)
+ strlcat(buf, "VCRC ", blen);
+ if (err & INFINIPATH_E_RRCVEGRFULL)
+ strlcat(buf, "rcvegrfull ", blen);
+ if (err & INFINIPATH_E_RRCVHDRFULL)
+ strlcat(buf, "rcvhdrfull ", blen);
+ if (err & INFINIPATH_E_IBSTATUSCHANGED)
+ strlcat(buf, "ibcstatuschg ", blen);
+ if (err & INFINIPATH_E_RIBLOSTLINK)
+ strlcat(buf, "riblostlink ", blen);
+ if (err & INFINIPATH_E_HARDWARE)
+ strlcat(buf, "hardware ", blen);
+ if (err & INFINIPATH_E_RESET)
+ strlcat(buf, "reset ", blen);
+}
+
+/**
+ * get_rhf_errstring - decode RHF errors
+ * @err: the err number
+ * @msg: the output buffer
+ * @len: the length of the output buffer
+ *
+ * only used one place now, may want more later
+ */
+static void get_rhf_errstring(u32 err, char *msg, size_t len)
+{
+ /* if no errors, and so don't need to check what's first */
+ *msg = '\0';
+
+ if (err & INFINIPATH_RHF_H_ICRCERR)
+ strlcat(msg, "icrcerr ", len);
+ if (err & INFINIPATH_RHF_H_VCRCERR)
+ strlcat(msg, "vcrcerr ", len);
+ if (err & INFINIPATH_RHF_H_PARITYERR)
+ strlcat(msg, "parityerr ", len);
+ if (err & INFINIPATH_RHF_H_LENERR)
+ strlcat(msg, "lenerr ", len);
+ if (err & INFINIPATH_RHF_H_MTUERR)
+ strlcat(msg, "mtuerr ", len);
+ if (err & INFINIPATH_RHF_H_IHDRERR)
+ /* infinipath hdr checksum error */
+ strlcat(msg, "ipathhdrerr ", len);
+ if (err & INFINIPATH_RHF_H_TIDERR)
+ strlcat(msg, "tiderr ", len);
+ if (err & INFINIPATH_RHF_H_MKERR)
+ /* bad port, offset, etc. */
+ strlcat(msg, "invalid ipathhdr ", len);
+ if (err & INFINIPATH_RHF_H_IBERR)
+ strlcat(msg, "iberr ", len);
+ if (err & INFINIPATH_RHF_L_SWA)
+ strlcat(msg, "swA ", len);
+ if (err & INFINIPATH_RHF_L_SWB)
+ strlcat(msg, "swB ", len);
+}
+
+/**
+ * ipath_get_egrbuf - get an eager buffer
+ * @dd: the infinipath device
+ * @bufnum: the eager buffer to get
+ * @err: unused
+ *
+ * must only be called if ipath_pd[port] is known to be allocated
+ */
+static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
+ int err)
+{
+ return dd->ipath_port0_skbs ?
+ (void *)dd->ipath_port0_skbs[bufnum]->data : NULL;
+}
+
+/**
+ * ipath_alloc_skb - allocate an skb and buffer with possible constraints
+ * @dd: the infinipath device
+ * @gfp_mask: the sk_buff SFP mask
+ */
+struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
+ gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+ u32 len;
+
+ /*
+ * Only fully supported way to handle this is to allocate lots
+ * extra, align as needed, and then do skb_reserve(). That wastes
+ * a lot of memory... I'll have to hack this into infinipath_copy
+ * also.
+ */
+
+ /*
+ * We need 4 extra bytes for unaligned transfer copying
+ */
+ if (dd->ipath_flags & IPATH_4BYTE_TID) {
+ /* we need a 4KB multiple alignment, and there is no way
+ * to do it except to allocate extra and then skb_reserve
+ * enough to bring it up to the right alignment.
+ */
+ len = dd->ipath_ibmaxlen + 4 + (1 << 11) - 1;
+ }
+ else
+ len = dd->ipath_ibmaxlen + 4;
+ skb = __dev_alloc_skb(len, gfp_mask);
+ if (!skb) {
+ ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
+ len);
+ goto bail;
+ }
+ if (dd->ipath_flags & IPATH_4BYTE_TID) {
+ u32 una = ((1 << 11) - 1) & (unsigned long)(skb->data + 4);
+ if (una)
+ skb_reserve(skb, 4 + (1 << 11) - una);
+ else
+ skb_reserve(skb, 4);
+ } else
+ skb_reserve(skb, 4);
+
+bail:
+ return skb;
+}
+
+/**
+ * ipath_rcv_layer - receive a packet for the layered (ethernet) driver
+ * @dd: the infinipath device
+ * @etail: the sk_buff number
+ * @tlen: the total packet length
+ * @hdr: the ethernet header
+ *
+ * Separate routine for better overall optimization
+ */
+static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
+ u32 tlen, struct ether_header *hdr)
+{
+ u32 elen;
+ u8 pad, *bthbytes;
+ struct sk_buff *skb, *nskb;
+
+ if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) {
+ /*
+ * Allocate a new sk_buff to replace the one we give
+ * to the network stack.
+ */
+ nskb = ipath_alloc_skb(dd, GFP_ATOMIC);
+ if (!nskb) {
+ /* count OK packets that we drop */
+ ipath_stats.sps_krdrops++;
+ return;
+ }
+
+ bthbytes = (u8 *) hdr->bth;
+ pad = (bthbytes[1] >> 4) & 3;
+ /* +CRC32 */
+ elen = tlen - (sizeof(*hdr) + pad + sizeof(u32));
+
+ skb = dd->ipath_port0_skbs[etail];
+ dd->ipath_port0_skbs[etail] = nskb;
+ skb_put(skb, elen);
+
+ dd->ipath_f_put_tid(dd, etail + (u64 __iomem *)
+ ((char __iomem *) dd->ipath_kregbase
+ + dd->ipath_rcvegrbase), 0,
+ virt_to_phys(nskb->data));
+
+ __ipath_layer_rcv(dd, hdr, skb);
+
+ /* another ether packet received */
+ ipath_stats.sps_ether_rpkts++;
+ }
+ else if (hdr->sub_opcode == OPCODE_LID_ARP)
+ __ipath_layer_rcv_lid(dd, hdr);
+}
+
+/*
+ * ipath_kreceive - receive a packet
+ * @dd: the infinipath device
+ *
+ * called from interrupt handler for errors or receive interrupt
+ */
+void ipath_kreceive(struct ipath_devdata *dd)
+{
+ u64 *rc;
+ void *ebuf;
+ const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
+ const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
+ u32 etail = -1, l, hdrqtail;
+ struct ips_message_header *hdr;
+ u32 eflags, i, etype, tlen, pkttot = 0;
+ static u64 totcalls; /* stats, may eventually remove */
+ char emsg[128];
+
+ if (!dd->ipath_hdrqtailptr) {
+ ipath_dev_err(dd,
+ "hdrqtailptr not set, can't do receives\n");
+ goto bail;
+ }
+
+ /* There is already a thread processing this queue. */
+ if (test_and_set_bit(0, &dd->ipath_rcv_pending))
+ goto bail;
+
+ if (dd->ipath_port0head ==
+ (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
+ goto done;
+
+gotmore:
+ /*
+ * read only once at start. If in flood situation, this helps
+ * performance slightly. If more arrive while we are processing,
+ * we'll come back here and do them
+ */
+ hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
+
+ for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) {
+ u32 qp;
+ u8 *bthbytes;
+
+ rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
+ hdr = (struct ips_message_header *)&rc[1];
+ /*
+ * could make a network order version of IPATH_KD_QP, and
+ * do the obvious shift before masking to speed this up.
+ */
+ qp = ntohl(hdr->bth[1]) & 0xffffff;
+ bthbytes = (u8 *) hdr->bth;
+
+ eflags = ips_get_hdr_err_flags((__le32 *) rc);
+ etype = ips_get_rcv_type((__le32 *) rc);
+ /* total length */
+ tlen = ips_get_length_in_bytes((__le32 *) rc);
+ ebuf = NULL;
+ if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
+ /*
+ * it turns out that the chips uses an eager buffer
+ * for all non-expected packets, whether it "needs"
+ * one or not. So always get the index, but don't
+ * set ebuf (so we try to copy data) unless the
+ * length requires it.
+ */
+ etail = ips_get_index((__le32 *) rc);
+ if (tlen > sizeof(*hdr) ||
+ etype == RCVHQ_RCV_TYPE_NON_KD)
+ ebuf = ipath_get_egrbuf(dd, etail, 0);
+ }
+
+ /*
+ * both tiderr and ipathhdrerr are set for all plain IB
+ * packets; only ipathhdrerr should be set.
+ */
+
+ if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
+ RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver(
+ hdr->iph.ver_port_tid_offset) !=
+ IPS_PROTO_VERSION) {
+ ipath_cdbg(PKT, "Bad InfiniPath protocol version "
+ "%x\n", etype);
+ }
+
+ if (eflags & ~(INFINIPATH_RHF_H_TIDERR |
+ INFINIPATH_RHF_H_IHDRERR)) {
+ get_rhf_errstring(eflags, emsg, sizeof emsg);
+ ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
+ "tlen=%x opcode=%x egridx=%x: %s\n",
+ eflags, l, etype, tlen, bthbytes[0],
+ ips_get_index((__le32 *) rc), emsg);
+ } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
+ int ret = __ipath_verbs_rcv(dd, rc + 1,
+ ebuf, tlen);
+ if (ret == -ENODEV)
+ ipath_cdbg(VERBOSE,
+ "received IB packet, "
+ "not SMA (QP=%x)\n", qp);
+ } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
+ if (qp == IPATH_KD_QP &&
+ bthbytes[0] == ipath_layer_rcv_opcode &&
+ ebuf)
+ ipath_rcv_layer(dd, etail, tlen,
+ (struct ether_header *)hdr);
+ else
+ ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
+ "qp=%x), len %x; ignored\n",
+ etype, bthbytes[0], qp, tlen);
+ }
+ else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
+ ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
+ be32_to_cpu(hdr->bth[0]) & 0xff);
+ else if (eflags & (INFINIPATH_RHF_H_TIDERR |
+ INFINIPATH_RHF_H_IHDRERR)) {
+ /*
+ * This is a type 3 packet, only the LRH is in the
+ * rcvhdrq, the rest of the header is in the eager
+ * buffer.
+ */
+ u8 opcode;
+ if (ebuf) {
+ bthbytes = (u8 *) ebuf;
+ opcode = *bthbytes;
+ }
+ else
+ opcode = 0;
+ get_rhf_errstring(eflags, emsg, sizeof emsg);
+ ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
+ "len %x\n", eflags, emsg, opcode, etail,
+ tlen);
+ } else {
+ /*
+ * error packet, type of error unknown.
+ * Probably type 3, but we don't know, so don't
+ * even try to print the opcode, etc.
+ */
+ ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
+ "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
+ "hdr %llx %llx %llx %llx %llx\n",
+ etail, tlen, (unsigned long) rc, l,
+ (unsigned long long) rc[0],
+ (unsigned long long) rc[1],
+ (unsigned long long) rc[2],
+ (unsigned long long) rc[3],
+ (unsigned long long) rc[4],
+ (unsigned long long) rc[5]);
+ }
+ l += rsize;
+ if (l >= maxcnt)
+ l = 0;
+ /*
+ * update for each packet, to help prevent overflows if we
+ * have lots of packets.
+ */
+ (void)ipath_write_ureg(dd, ur_rcvhdrhead,
+ dd->ipath_rhdrhead_intr_off | l, 0);
+ if (etype != RCVHQ_RCV_TYPE_EXPECTED)
+ (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
+ etail, 0);
+ }
+
+ pkttot += i;
+
+ dd->ipath_port0head = l;
+
+ if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
+ /* more arrived while we handled first batch */
+ goto gotmore;
+
+ if (pkttot > ipath_stats.sps_maxpkts_call)
+ ipath_stats.sps_maxpkts_call = pkttot;
+ ipath_stats.sps_port0pkts += pkttot;
+ ipath_stats.sps_avgpkts_call =
+ ipath_stats.sps_port0pkts / ++totcalls;
+
+done:
+ clear_bit(0, &dd->ipath_rcv_pending);
+ smp_mb__after_clear_bit();
+
+bail:;
+}
+
+/**
+ * ipath_update_pio_bufs - update shadow copy of the PIO availability map
+ * @dd: the infinipath device
+ *
+ * called whenever our local copy indicates we have run out of send buffers
+ * NOTE: This can be called from interrupt context by some code
+ * and from non-interrupt context by ipath_getpiobuf().
+ */
+
+static void ipath_update_pio_bufs(struct ipath_devdata *dd)
+{
+ unsigned long flags;
+ int i;
+ const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
+
+ /* If the generation (check) bits have changed, then we update the
+ * busy bit for the corresponding PIO buffer. This algorithm will
+ * modify positions to the value they already have in some cases
+ * (i.e., no change), but it's faster than changing only the bits
+ * that have changed.
+ *
+ * We would like to do this atomicly, to avoid spinlocks in the
+ * critical send path, but that's not really possible, given the
+ * type of changes, and that this routine could be called on
+ * multiple cpu's simultaneously, so we lock in this routine only,
+ * to avoid conflicting updates; all we change is the shadow, and
+ * it's a single 64 bit memory location, so by definition the update
+ * is atomic in terms of what other cpu's can see in testing the
+ * bits. The spin_lock overhead isn't too bad, since it only
+ * happens when all buffers are in use, so only cpu overhead, not
+ * latency or bandwidth is affected.
+ */
+#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
+ if (!dd->ipath_pioavailregs_dma) {
+ ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
+ return;
+ }
+ if (ipath_debug & __IPATH_VERBDBG) {
+ /* only if packet debug and verbose */
+ volatile __le64 *dma = dd->ipath_pioavailregs_dma;
+ unsigned long *shadow = dd->ipath_pioavailshadow;
+
+ ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
+ "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
+ "s3=%lx\n",
+ (unsigned long long) le64_to_cpu(dma[0]),
+ shadow[0],
+ (unsigned long long) le64_to_cpu(dma[1]),
+ shadow[1],
+ (unsigned long long) le64_to_cpu(dma[2]),
+ shadow[2],
+ (unsigned long long) le64_to_cpu(dma[3]),
+ shadow[3]);
+ if (piobregs > 4)
+ ipath_cdbg(
+ PKT, "2nd group, dma4=%llx shad4=%lx, "
+ "d5=%llx s5=%lx, d6=%llx s6=%lx, "
+ "d7=%llx s7=%lx\n",
+ (unsigned long long) le64_to_cpu(dma[4]),
+ shadow[4],
+ (unsigned long long) le64_to_cpu(dma[5]),
+ shadow[5],
+ (unsigned long long) le64_to_cpu(dma[6]),
+ shadow[6],
+ (unsigned long long) le64_to_cpu(dma[7]),
+ shadow[7]);
+ }
+ spin_lock_irqsave(&ipath_pioavail_lock, flags);
+ for (i = 0; i < piobregs; i++) {
+ u64 pchbusy, pchg, piov, pnew;
+ /*
+ * Chip Errata: bug 6641; even and odd qwords>3 are swapped
+ */
+ if (i > 3) {
+ if (i & 1)
+ piov = le64_to_cpu(
+ dd->ipath_pioavailregs_dma[i - 1]);
+ else
+ piov = le64_to_cpu(
+ dd->ipath_pioavailregs_dma[i + 1]);
+ } else
+ piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
+ pchg = _IPATH_ALL_CHECKBITS &
+ ~(dd->ipath_pioavailshadow[i] ^ piov);
+ pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
+ if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
+ pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
+ pnew |= piov & pchbusy;
+ dd->ipath_pioavailshadow[i] = pnew;
+ }
+ }
+ spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
+}
+
+/**
+ * ipath_setrcvhdrsize - set the receive header size
+ * @dd: the infinipath device
+ * @rhdrsize: the receive header size
+ *
+ * called from user init code, and also layered driver init
+ */
+int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
+{
+ int ret = 0;
+
+ if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
+ if (dd->ipath_rcvhdrsize != rhdrsize) {
+ dev_info(&dd->pcidev->dev,
+ "Error: can't set protocol header "
+ "size %u, already %u\n",
+ rhdrsize, dd->ipath_rcvhdrsize);
+ ret = -EAGAIN;
+ } else
+ ipath_cdbg(VERBOSE, "Reuse same protocol header "
+ "size %u\n", dd->ipath_rcvhdrsize);
+ } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
+ (sizeof(u64) / sizeof(u32)))) {
+ ipath_dbg("Error: can't set protocol header size %u "
+ "(> max %u)\n", rhdrsize,
+ dd->ipath_rcvhdrentsize -
+ (u32) (sizeof(u64) / sizeof(u32)));
+ ret = -EOVERFLOW;
+ } else {
+ dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
+ dd->ipath_rcvhdrsize = rhdrsize;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
+ dd->ipath_rcvhdrsize);
+ ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
+ dd->ipath_rcvhdrsize);
+ }
+ return ret;
+}
+
+/**
+ * ipath_getpiobuf - find an available pio buffer
+ * @dd: the infinipath device
+ * @pbufnum: the buffer number is placed here
+ *
+ * do appropriate marking as busy, etc.
+ * returns buffer number if one found (>=0), negative number is error.
+ * Used by ipath_sma_send_pkt and ipath_layer_send
+ */
+u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
+{
+ int i, j, starti, updated = 0;
+ unsigned piobcnt, iter;
+ unsigned long flags;
+ unsigned long *shadow = dd->ipath_pioavailshadow;
+ u32 __iomem *buf;
+
+ piobcnt = (unsigned)(dd->ipath_piobcnt2k
+ + dd->ipath_piobcnt4k);
+ starti = dd->ipath_lastport_piobuf;
+ iter = piobcnt - starti;
+ if (dd->ipath_upd_pio_shadow) {
+ /*
+ * Minor optimization. If we had no buffers on last call,
+ * start out by doing the update; continue and do scan even
+ * if no buffers were updated, to be paranoid
+ */
+ ipath_update_pio_bufs(dd);
+ /* we scanned here, don't do it at end of scan */
+ updated = 1;
+ i = starti;
+ } else
+ i = dd->ipath_lastpioindex;
+
+rescan:
+ /*
+ * while test_and_set_bit() is atomic, we do that and then the
+ * change_bit(), and the pair is not. See if this is the cause
+ * of the remaining armlaunch errors.
+ */
+ spin_lock_irqsave(&ipath_pioavail_lock, flags);
+ for (j = 0; j < iter; j++, i++) {
+ if (i >= piobcnt)
+ i = starti;
+ /*
+ * To avoid bus lock overhead, we first find a candidate
+ * buffer, then do the test and set, and continue if that
+ * fails.
+ */
+ if (test_bit((2 * i) + 1, shadow) ||
+ test_and_set_bit((2 * i) + 1, shadow))
+ continue;
+ /* flip generation bit */
+ change_bit(2 * i, shadow);
+ break;
+ }
+ spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
+
+ if (j == iter) {
+ volatile __le64 *dma = dd->ipath_pioavailregs_dma;
+
+ /*
+ * first time through; shadow exhausted, but may be real
+ * buffers available, so go see; if any updated, rescan
+ * (once)
+ */
+ if (!updated) {
+ ipath_update_pio_bufs(dd);
+ updated = 1;
+ i = starti;
+ goto rescan;
+ }
+ dd->ipath_upd_pio_shadow = 1;
+ /*
+ * not atomic, but if we lose one once in a while, that's OK
+ */
+ ipath_stats.sps_nopiobufs++;
+ if (!(++dd->ipath_consec_nopiobuf % 100000)) {
+ ipath_dbg(
+ "%u pio sends with no bufavail; dmacopy: "
+ "%llx %llx %llx %llx; shadow: "
+ "%lx %lx %lx %lx\n",
+ dd->ipath_consec_nopiobuf,
+ (unsigned long long) le64_to_cpu(dma[0]),
+ (unsigned long long) le64_to_cpu(dma[1]),
+ (unsigned long long) le64_to_cpu(dma[2]),
+ (unsigned long long) le64_to_cpu(dma[3]),
+ shadow[0], shadow[1], shadow[2],
+ shadow[3]);
+ /*
+ * 4 buffers per byte, 4 registers above, cover rest
+ * below
+ */
+ if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
+ (sizeof(shadow[0]) * 4 * 4))
+ ipath_dbg("2nd group: dmacopy: %llx %llx "
+ "%llx %llx; shadow: %lx %lx "
+ "%lx %lx\n",
+ (unsigned long long)
+ le64_to_cpu(dma[4]),
+ (unsigned long long)
+ le64_to_cpu(dma[5]),
+ (unsigned long long)
+ le64_to_cpu(dma[6]),
+ (unsigned long long)
+ le64_to_cpu(dma[7]),
+ shadow[4], shadow[5],
+ shadow[6], shadow[7]);
+ }
+ buf = NULL;
+ goto bail;
+ }
+
+ if (updated)
+ /*
+ * ran out of bufs, now some (at least this one we just
+ * got) are now available, so tell the layered driver.
+ */
+ __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
+
+ /*
+ * set next starting place. Since it's just an optimization,
+ * it doesn't matter who wins on this, so no locking
+ */
+ dd->ipath_lastpioindex = i + 1;
+ if (dd->ipath_upd_pio_shadow)
+ dd->ipath_upd_pio_shadow = 0;
+ if (dd->ipath_consec_nopiobuf)
+ dd->ipath_consec_nopiobuf = 0;
+ if (i < dd->ipath_piobcnt2k)
+ buf = (u32 __iomem *) (dd->ipath_pio2kbase +
+ i * dd->ipath_palign);
+ else
+ buf = (u32 __iomem *)
+ (dd->ipath_pio4kbase +
+ (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
+ ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
+ i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
+ if (pbufnum)
+ *pbufnum = i;
+
+bail:
+ return buf;
+}
+
+/**
+ * ipath_create_rcvhdrq - create a receive header queue
+ * @dd: the infinipath device
+ * @pd: the port data
+ *
+ * this *must* be physically contiguous memory, and for now,
+ * that limits it to what kmalloc can do.
+ */
+int ipath_create_rcvhdrq(struct ipath_devdata *dd,
+ struct ipath_portdata *pd)
+{
+ int ret = 0, amt;
+
+ amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE);
+ if (!pd->port_rcvhdrq) {
+ /*
+ * not using REPEAT isn't viable; at 128KB, we can easily
+ * fail this. The problem with REPEAT is we can block here
+ * "forever". There isn't an inbetween, unfortunately. We
+ * could reduce the risk by never freeing the rcvhdrq except
+ * at unload, but even then, the first time a port is used,
+ * we could delay for some time...
+ */
+ gfp_t gfp_flags = GFP_USER | __GFP_COMP;
+
+ pd->port_rcvhdrq = dma_alloc_coherent(
+ &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
+ gfp_flags);
+
+ if (!pd->port_rcvhdrq) {
+ ipath_dev_err(dd, "attempt to allocate %d bytes "
+ "for port %u rcvhdrq failed\n",
+ amt, pd->port_port);
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ pd->port_rcvhdrq_size = amt;
+
+ ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
+ "for port %u rcvhdr Q\n",
+ amt >> PAGE_SHIFT, pd->port_rcvhdrq,
+ (unsigned long) pd->port_rcvhdrq_phys,
+ (unsigned long) pd->port_rcvhdrq_size,
+ pd->port_port);
+ } else {
+ /*
+ * clear for security, sanity, and/or debugging, each
+ * time we reuse
+ */
+ memset(pd->port_rcvhdrq, 0, amt);
+ }
+
+ /*
+ * tell chip each time we init it, even if we are re-using previous
+ * memory (we zero it at process close)
+ */
+ ipath_cdbg(VERBOSE, "writing port %d rcvhdraddr as %lx\n",
+ pd->port_port, (unsigned long) pd->port_rcvhdrq_phys);
+ ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
+ pd->port_port, pd->port_rcvhdrq_phys);
+
+ ret = 0;
+bail:
+ return ret;
+}
+
+int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
+ u64 bits_to_wait_for, u64 * valp)
+{
+ unsigned long timeout;
+ u64 lastval, val;
+ int ret;
+
+ lastval = ipath_read_kreg64(dd, reg_id);
+ /* wait a ridiculously long time */
+ timeout = jiffies + msecs_to_jiffies(5);
+ do {
+ val = ipath_read_kreg64(dd, reg_id);
+ /* set so they have something, even on failures. */
+ *valp = val;
+ if ((val & bits_to_wait_for) == bits_to_wait_for) {
+ ret = 0;
+ break;
+ }
+ if (val != lastval)
+ ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
+ "waiting for %llx bits\n",
+ (unsigned long long) lastval,
+ (unsigned long long) val,
+ (unsigned long long) bits_to_wait_for);
+ cond_resched();
+ if (time_after(jiffies, timeout)) {
+ ipath_dbg("Didn't get bits %llx in register 0x%x, "
+ "got %llx\n",
+ (unsigned long long) bits_to_wait_for,
+ reg_id, (unsigned long long) *valp);
+ ret = -ENODEV;
+ break;
+ }
+ } while (1);
+
+ return ret;
+}
+
+/**
+ * ipath_waitfor_mdio_cmdready - wait for last command to complete
+ * @dd: the infinipath device
+ *
+ * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
+ * away indicating the last command has completed. It doesn't return data
+ */
+int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
+{
+ unsigned long timeout;
+ u64 val;
+ int ret;
+
+ /* wait a ridiculously long time */
+ timeout = jiffies + msecs_to_jiffies(5);
+ do {
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
+ if (!(val & IPATH_MDIO_CMDVALID)) {
+ ret = 0;
+ break;
+ }
+ cond_resched();
+ if (time_after(jiffies, timeout)) {
+ ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
+ (unsigned long long) val);
+ ret = -ENODEV;
+ break;
+ }
+ } while (1);
+
+ return ret;
+}
+
+void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
+{
+ static const char *what[4] = {
+ [0] = "DOWN",
+ [INFINIPATH_IBCC_LINKCMD_INIT] = "INIT",
+ [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
+ [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
+ };
+ ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
+ "is %s\n", dd->ipath_unit,
+ what[(which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
+ INFINIPATH_IBCC_LINKCMD_MASK],
+ ipath_ibcstatus_str[
+ (ipath_read_kreg64
+ (dd, dd->ipath_kregs->kr_ibcstatus) >>
+ INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
+ INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
+
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl | which);
+}
+
+/**
+ * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
+ * @dd: the infinipath device
+ * @regno: the register number to read
+ * @port: the port containing the register
+ *
+ * Registers that vary with the chip implementation constants (port)
+ * use this routine.
+ */
+u64 ipath_read_kreg64_port(const struct ipath_devdata *dd, ipath_kreg regno,
+ unsigned port)
+{
+ u16 where;
+
+ if (port < dd->ipath_portcnt &&
+ (regno == dd->ipath_kregs->kr_rcvhdraddr ||
+ regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
+ where = regno + port;
+ else
+ where = -1;
+
+ return ipath_read_kreg64(dd, where);
+}
+
+/**
+ * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
+ * @dd: the infinipath device
+ * @regno: the register number to write
+ * @port: the port containing the register
+ * @value: the value to write
+ *
+ * Registers that vary with the chip implementation constants (port)
+ * use this routine.
+ */
+void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
+ unsigned port, u64 value)
+{
+ u16 where;
+
+ if (port < dd->ipath_portcnt &&
+ (regno == dd->ipath_kregs->kr_rcvhdraddr ||
+ regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
+ where = regno + port;
+ else
+ where = -1;
+
+ ipath_write_kreg(dd, where, value);
+}
+
+/**
+ * ipath_shutdown_device - shut down a device
+ * @dd: the infinipath device
+ *
+ * This is called to make the device quiet when we are about to
+ * unload the driver, and also when the device is administratively
+ * disabled. It does not free any data structures.
+ * Everything it does has to be setup again by ipath_init_chip(dd,1)
+ */
+void ipath_shutdown_device(struct ipath_devdata *dd)
+{
+ u64 val;
+
+ ipath_dbg("Shutting down the device\n");
+
+ dd->ipath_flags |= IPATH_LINKUNK;
+ dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
+ IPATH_LINKINIT | IPATH_LINKARMED |
+ IPATH_LINKACTIVE);
+ *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
+ IPATH_STATUS_IB_READY);
+
+ /* mask interrupts, but not errors */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
+
+ dd->ipath_rcvctrl = 0;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+
+ /*
+ * gracefully stop all sends allowing any in progress to trickle out
+ * first.
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
+ /* flush it */
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ /*
+ * enough for anything that's going to trickle out to have actually
+ * done so.
+ */
+ udelay(5);
+
+ /*
+ * abort any armed or launched PIO buffers that didn't go. (self
+ * clearing). Will cause any packet currently being transmitted to
+ * go out with an EBP, and may also cause a short packet error on
+ * the receiver.
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ INFINIPATH_S_ABORT);
+
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
+ INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+
+ /*
+ * we are shutting down, so tell the layered driver. We don't do
+ * this on just a link state change, much like ethernet, a cable
+ * unplug, etc. doesn't change driver state
+ */
+ ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN);
+
+ /* disable IBC */
+ dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+ dd->ipath_control);
+
+ /*
+ * clear SerdesEnable and turn the leds off; do this here because
+ * we are unloading, so don't count on interrupts to move along
+ * Turn the LEDs off explictly for the same reason.
+ */
+ dd->ipath_f_quiet_serdes(dd);
+ dd->ipath_f_setextled(dd, 0, 0);
+
+ if (dd->ipath_stats_timer_active) {
+ del_timer_sync(&dd->ipath_stats_timer);
+ dd->ipath_stats_timer_active = 0;
+ }
+
+ /*
+ * clear all interrupts and errors, so that the next time the driver
+ * is loaded or device is enabled, we know that whatever is set
+ * happened while we were unloaded
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+ ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
+}
+
+/**
+ * ipath_free_pddata - free a port's allocated data
+ * @dd: the infinipath device
+ * @port: the port
+ * @freehdrq: free the port data structure if true
+ *
+ * when closing, free up any allocated data for a port, if the
+ * reference count goes to zero
+ * Note: this also optionally frees the portdata itself!
+ * Any changes here have to be matched up with the reinit case
+ * of ipath_init_chip(), which calls this routine on reinit after reset.
+ */
+void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq)
+{
+ struct ipath_portdata *pd = dd->ipath_pd[port];
+
+ if (!pd)
+ return;
+ if (freehdrq)
+ /*
+ * only clear and free portdata if we are going to also
+ * release the hdrq, otherwise we leak the hdrq on each
+ * open/close cycle
+ */
+ dd->ipath_pd[port] = NULL;
+ if (freehdrq && pd->port_rcvhdrq) {
+ ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
+ "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
+ (unsigned long) pd->port_rcvhdrq_size);
+ dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
+ pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
+ pd->port_rcvhdrq = NULL;
+ }
+ if (port && pd->port_rcvegrbuf) {
+ /* always free this */
+ if (pd->port_rcvegrbuf) {
+ unsigned e;
+
+ for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
+ void *base = pd->port_rcvegrbuf[e];
+ size_t size = pd->port_rcvegrbuf_size;
+
+ ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
+ "chunk %u/%u\n", base,
+ (unsigned long) size,
+ e, pd->port_rcvegrbuf_chunks);
+ dma_free_coherent(
+ &dd->pcidev->dev, size, base,
+ pd->port_rcvegrbuf_phys[e]);
+ }
+ vfree(pd->port_rcvegrbuf);
+ pd->port_rcvegrbuf = NULL;
+ vfree(pd->port_rcvegrbuf_phys);
+ pd->port_rcvegrbuf_phys = NULL;
+ }
+ pd->port_rcvegrbuf_chunks = 0;
+ } else if (port == 0 && dd->ipath_port0_skbs) {
+ unsigned e;
+ struct sk_buff **skbs = dd->ipath_port0_skbs;
+
+ dd->ipath_port0_skbs = NULL;
+ ipath_cdbg(VERBOSE, "free closed port %d ipath_port0_skbs "
+ "@ %p\n", pd->port_port, skbs);
+ for (e = 0; e < dd->ipath_rcvegrcnt; e++)
+ if (skbs[e])
+ dev_kfree_skb(skbs[e]);
+ vfree(skbs);
+ }
+ if (freehdrq) {
+ kfree(pd->port_tid_pg_list);
+ kfree(pd);
+ }
+}
+
+int __init infinipath_init(void)
+{
+ int ret;
+
+ ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version);
+
+ /*
+ * These must be called before the driver is registered with
+ * the PCI subsystem.
+ */
+ idr_init(&unit_table);
+ if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ret = pci_register_driver(&ipath_driver);
+ if (ret < 0) {
+ printk(KERN_ERR IPATH_DRV_NAME
+ ": Unable to register driver: error %d\n", -ret);
+ goto bail_unit;
+ }
+
+ ret = ipath_driver_create_group(&ipath_driver.driver);
+ if (ret < 0) {
+ printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
+ "sysfs entries: error %d\n", -ret);
+ goto bail_pci;
+ }
+
+ ret = ipath_init_ipathfs();
+ if (ret < 0) {
+ printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
+ "ipathfs: error %d\n", -ret);
+ goto bail_group;
+ }
+
+ goto bail;
+
+bail_group:
+ ipath_driver_remove_group(&ipath_driver.driver);
+
+bail_pci:
+ pci_unregister_driver(&ipath_driver);
+
+bail_unit:
+ idr_destroy(&unit_table);
+
+bail:
+ return ret;
+}
+
+static void cleanup_device(struct ipath_devdata *dd)
+{
+ int port;
+
+ ipath_shutdown_device(dd);
+
+ if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
+ /* can't do anything more with chip; needs re-init */
+ *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
+ if (dd->ipath_kregbase) {
+ /*
+ * if we haven't already cleaned up before these are
+ * to ensure any register reads/writes "fail" until
+ * re-init
+ */
+ dd->ipath_kregbase = NULL;
+ dd->ipath_kregvirt = NULL;
+ dd->ipath_uregbase = 0;
+ dd->ipath_sregbase = 0;
+ dd->ipath_cregbase = 0;
+ dd->ipath_kregsize = 0;
+ }
+ ipath_disable_wc(dd);
+ }
+
+ if (dd->ipath_pioavailregs_dma) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *) dd->ipath_pioavailregs_dma,
+ dd->ipath_pioavailregs_phys);
+ dd->ipath_pioavailregs_dma = NULL;
+ }
+
+ if (dd->ipath_pageshadow) {
+ struct page **tmpp = dd->ipath_pageshadow;
+ int i, cnt = 0;
+
+ ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
+ "locked\n");
+ for (port = 0; port < dd->ipath_cfgports; port++) {
+ int port_tidbase = port * dd->ipath_rcvtidcnt;
+ int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
+ for (i = port_tidbase; i < maxtid; i++) {
+ if (!tmpp[i])
+ continue;
+ ipath_release_user_pages(&tmpp[i], 1);
+ tmpp[i] = NULL;
+ cnt++;
+ }
+ }
+ if (cnt) {
+ ipath_stats.sps_pageunlocks += cnt;
+ ipath_cdbg(VERBOSE, "There were still %u expTID "
+ "entries locked\n", cnt);
+ }
+ if (ipath_stats.sps_pagelocks ||
+ ipath_stats.sps_pageunlocks)
+ ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
+ "unlocked via ipath_m{un}lock\n",
+ (unsigned long long)
+ ipath_stats.sps_pagelocks,
+ (unsigned long long)
+ ipath_stats.sps_pageunlocks);
+
+ ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
+ dd->ipath_pageshadow);
+ vfree(dd->ipath_pageshadow);
+ dd->ipath_pageshadow = NULL;
+ }
+
+ /*
+ * free any resources still in use (usually just kernel ports)
+ * at unload
+ */
+ for (port = 0; port < dd->ipath_cfgports; port++)
+ ipath_free_pddata(dd, port, 1);
+ kfree(dd->ipath_pd);
+ /*
+ * debuggability, in case some cleanup path tries to use it
+ * after this
+ */
+ dd->ipath_pd = NULL;
+}
+
+static void __exit infinipath_cleanup(void)
+{
+ struct ipath_devdata *dd, *tmp;
+ unsigned long flags;
+
+ ipath_exit_ipathfs();
+
+ ipath_driver_remove_group(&ipath_driver.driver);
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ /*
+ * turn off rcv, send, and interrupts for all ports, all drivers
+ * should also hard reset the chip here?
+ * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
+ * for all versions of the driver, if they were allocated
+ */
+ list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+ if (dd->ipath_kregbase)
+ cleanup_device(dd);
+
+ if (dd->pcidev) {
+ if (dd->pcidev->irq) {
+ ipath_cdbg(VERBOSE,
+ "unit %u free_irq of irq %x\n",
+ dd->ipath_unit, dd->pcidev->irq);
+ free_irq(dd->pcidev->irq, dd);
+ } else
+ ipath_dbg("irq is 0, not doing free_irq "
+ "for unit %u\n", dd->ipath_unit);
+ dd->pcidev = NULL;
+ }
+
+ /*
+ * we check for NULL here, because it's outside the kregbase
+ * check, and we need to call it after the free_irq. Thus
+ * it's possible that the function pointers were never
+ * initialized.
+ */
+ if (dd->ipath_f_cleanup)
+ /* clean up chip-specific stuff */
+ dd->ipath_f_cleanup(dd);
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+ ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
+ pci_unregister_driver(&ipath_driver);
+
+ idr_destroy(&unit_table);
+}
+
+/**
+ * ipath_reset_device - reset the chip if possible
+ * @unit: the device to reset
+ *
+ * Whether or not reset is successful, we attempt to re-initialize the chip
+ * (that is, much like a driver unload/reload). We clear the INITTED flag
+ * so that the various entry points will fail until we reinitialize. For
+ * now, we only allow this if no user ports are open that use chip resources
+ */
+int ipath_reset_device(int unit)
+{
+ int ret, i;
+ struct ipath_devdata *dd = ipath_lookup(unit);
+
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
+
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
+ dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
+ "not initialized or not present\n", unit);
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ if (dd->ipath_pd)
+ for (i = 1; i < dd->ipath_portcnt; i++) {
+ if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
+ ipath_dbg("unit %u port %d is in use "
+ "(PID %u cmd %s), can't reset\n",
+ unit, i,
+ dd->ipath_pd[i]->port_pid,
+ dd->ipath_pd[i]->port_comm);
+ ret = -EBUSY;
+ goto bail;
+ }
+ }
+
+ dd->ipath_flags &= ~IPATH_INITTED;
+ ret = dd->ipath_f_reset(dd);
+ if (ret != 1)
+ ipath_dbg("reset was not successful\n");
+ ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
+ unit);
+ ret = ipath_init_chip(dd, 1);
+ if (ret)
+ ipath_dev_err(dd, "Reinitialize unit %u after "
+ "reset failed with %d\n", unit, ret);
+ else
+ dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
+ "resetting\n", unit);
+
+bail:
+ return ret;
+}
+
+module_init(infinipath_init);
+module_exit(infinipath_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
new file mode 100644
index 0000000000000..f11a900e8cd7d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "ipath_kernel.h"
+
+/*
+ * InfiniPath I2C driver for a serial eeprom. This is not a generic
+ * I2C interface. For a start, the device we're using (Atmel AT24C11)
+ * doesn't work like a regular I2C device. It looks like one
+ * electrically, but not logically. Normal I2C devices have a single
+ * 7-bit or 10-bit I2C address that they respond to. Valid 7-bit
+ * addresses range from 0x03 to 0x77. Addresses 0x00 to 0x02 and 0x78
+ * to 0x7F are special reserved addresses (e.g. 0x00 is the "general
+ * call" address.) The Atmel device, on the other hand, responds to ALL
+ * 7-bit addresses. It's designed to be the only device on a given I2C
+ * bus. A 7-bit address corresponds to the memory address within the
+ * Atmel device itself.
+ *
+ * Also, the timing requirements mean more than simple software
+ * bitbanging, with readbacks from chip to ensure timing (simple udelay
+ * is not enough).
+ *
+ * This all means that accessing the device is specialized enough
+ * that using the standard kernel I2C bitbanging interface would be
+ * impossible. For example, the core I2C eeprom driver expects to find
+ * a device at one or more of a limited set of addresses only. It doesn't
+ * allow writing to an eeprom. It also doesn't provide any means of
+ * accessing eeprom contents from within the kernel, only via sysfs.
+ */
+
+enum i2c_type {
+ i2c_line_scl = 0,
+ i2c_line_sda
+};
+
+enum i2c_state {
+ i2c_line_low = 0,
+ i2c_line_high
+};
+
+#define READ_CMD 1
+#define WRITE_CMD 0
+
+static int eeprom_init;
+
+/*
+ * The gpioval manipulation really should be protected by spinlocks
+ * or be converted to use atomic operations.
+ */
+
+/**
+ * i2c_gpio_set - set a GPIO line
+ * @dd: the infinipath device
+ * @line: the line to set
+ * @new_line_state: the state to set
+ *
+ * Returns 0 if the line was set to the new state successfully, non-zero
+ * on error.
+ */
+static int i2c_gpio_set(struct ipath_devdata *dd,
+ enum i2c_type line,
+ enum i2c_state new_line_state)
+{
+ u64 read_val, write_val, mask, *gpioval;
+
+ gpioval = &dd->ipath_gpio_out;
+ read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
+ if (line == i2c_line_scl)
+ mask = ipath_gpio_scl;
+ else
+ mask = ipath_gpio_sda;
+
+ if (new_line_state == i2c_line_high)
+ /* tri-state the output rather than force high */
+ write_val = read_val & ~mask;
+ else
+ /* config line to be an output */
+ write_val = read_val | mask;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val);
+
+ /* set high and verify */
+ if (new_line_state == i2c_line_high)
+ write_val = 0x1UL;
+ else
+ write_val = 0x0UL;
+
+ if (line == i2c_line_scl) {
+ write_val <<= ipath_gpio_scl_num;
+ *gpioval = *gpioval & ~(1UL << ipath_gpio_scl_num);
+ *gpioval |= write_val;
+ } else {
+ write_val <<= ipath_gpio_sda_num;
+ *gpioval = *gpioval & ~(1UL << ipath_gpio_sda_num);
+ *gpioval |= write_val;
+ }
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
+
+ return 0;
+}
+
+/**
+ * i2c_gpio_get - get a GPIO line state
+ * @dd: the infinipath device
+ * @line: the line to get
+ * @curr_statep: where to put the line state
+ *
+ * Returns 0 if the line was set to the new state successfully, non-zero
+ * on error. curr_state is not set on error.
+ */
+static int i2c_gpio_get(struct ipath_devdata *dd,
+ enum i2c_type line,
+ enum i2c_state *curr_statep)
+{
+ u64 read_val, write_val, mask;
+ int ret;
+
+ /* check args */
+ if (curr_statep == NULL) {
+ ret = 1;
+ goto bail;
+ }
+
+ read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
+ /* config line to be an input */
+ if (line == i2c_line_scl)
+ mask = ipath_gpio_scl;
+ else
+ mask = ipath_gpio_sda;
+ write_val = read_val & ~mask;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val);
+ read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+
+ if (read_val & mask)
+ *curr_statep = i2c_line_high;
+ else
+ *curr_statep = i2c_line_low;
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * i2c_wait_for_writes - wait for a write
+ * @dd: the infinipath device
+ *
+ * We use this instead of udelay directly, so we can make sure
+ * that previous register writes have been flushed all the way
+ * to the chip. Since we are delaying anyway, the cost doesn't
+ * hurt, and makes the bit twiddling more regular
+ */
+static void i2c_wait_for_writes(struct ipath_devdata *dd)
+{
+ (void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+}
+
+static void scl_out(struct ipath_devdata *dd, u8 bit)
+{
+ i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
+
+ i2c_wait_for_writes(dd);
+}
+
+static void sda_out(struct ipath_devdata *dd, u8 bit)
+{
+ i2c_gpio_set(dd, i2c_line_sda, bit ? i2c_line_high : i2c_line_low);
+
+ i2c_wait_for_writes(dd);
+}
+
+static u8 sda_in(struct ipath_devdata *dd, int wait)
+{
+ enum i2c_state bit;
+
+ if (i2c_gpio_get(dd, i2c_line_sda, &bit))
+ ipath_dbg("get bit failed!\n");
+
+ if (wait)
+ i2c_wait_for_writes(dd);
+
+ return bit == i2c_line_high ? 1U : 0;
+}
+
+/**
+ * i2c_ackrcv - see if ack following write is true
+ * @dd: the infinipath device
+ */
+static int i2c_ackrcv(struct ipath_devdata *dd)
+{
+ u8 ack_received;
+
+ /* AT ENTRY SCL = LOW */
+ /* change direction, ignore data */
+ ack_received = sda_in(dd, 1);
+ scl_out(dd, i2c_line_high);
+ ack_received = sda_in(dd, 1) == 0;
+ scl_out(dd, i2c_line_low);
+ return ack_received;
+}
+
+/**
+ * wr_byte - write a byte, one bit at a time
+ * @dd: the infinipath device
+ * @data: the byte to write
+ *
+ * Returns 0 if we got the following ack, otherwise 1
+ */
+static int wr_byte(struct ipath_devdata *dd, u8 data)
+{
+ int bit_cntr;
+ u8 bit;
+
+ for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
+ bit = (data >> bit_cntr) & 1;
+ sda_out(dd, bit);
+ scl_out(dd, i2c_line_high);
+ scl_out(dd, i2c_line_low);
+ }
+ return (!i2c_ackrcv(dd)) ? 1 : 0;
+}
+
+static void send_ack(struct ipath_devdata *dd)
+{
+ sda_out(dd, i2c_line_low);
+ scl_out(dd, i2c_line_high);
+ scl_out(dd, i2c_line_low);
+ sda_out(dd, i2c_line_high);
+}
+
+/**
+ * i2c_startcmd - transmit the start condition, followed by address/cmd
+ * @dd: the infinipath device
+ * @offset_dir: direction byte
+ *
+ * (both clock/data high, clock high, data low while clock is high)
+ */
+static int i2c_startcmd(struct ipath_devdata *dd, u8 offset_dir)
+{
+ int res;
+
+ /* issue start sequence */
+ sda_out(dd, i2c_line_high);
+ scl_out(dd, i2c_line_high);
+ sda_out(dd, i2c_line_low);
+ scl_out(dd, i2c_line_low);
+
+ /* issue length and direction byte */
+ res = wr_byte(dd, offset_dir);
+
+ if (res)
+ ipath_cdbg(VERBOSE, "No ack to complete start\n");
+
+ return res;
+}
+
+/**
+ * stop_cmd - transmit the stop condition
+ * @dd: the infinipath device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_cmd(struct ipath_devdata *dd)
+{
+ scl_out(dd, i2c_line_low);
+ sda_out(dd, i2c_line_low);
+ scl_out(dd, i2c_line_high);
+ sda_out(dd, i2c_line_high);
+ udelay(2);
+}
+
+/**
+ * eeprom_reset - reset I2C communication
+ * @dd: the infinipath device
+ */
+
+static int eeprom_reset(struct ipath_devdata *dd)
+{
+ int clock_cycles_left = 9;
+ u64 *gpioval = &dd->ipath_gpio_out;
+ int ret;
+
+ eeprom_init = 1;
+ *gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
+ ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
+ "is %llx\n", (unsigned long long) *gpioval);
+
+ /*
+ * This is to get the i2c into a known state, by first going low,
+ * then tristate sda (and then tristate scl as first thing
+ * in loop)
+ */
+ scl_out(dd, i2c_line_low);
+ sda_out(dd, i2c_line_high);
+
+ while (clock_cycles_left--) {
+ scl_out(dd, i2c_line_high);
+
+ if (sda_in(dd, 0)) {
+ sda_out(dd, i2c_line_low);
+ scl_out(dd, i2c_line_low);
+ ret = 0;
+ goto bail;
+ }
+
+ scl_out(dd, i2c_line_low);
+ }
+
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_eeprom_read - receives bytes from the eeprom via I2C
+ * @dd: the infinipath device
+ * @eeprom_offset: address to read from
+ * @buffer: where to store result
+ * @len: number of bytes to receive
+ */
+
+int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
+ void *buffer, int len)
+{
+ /* compiler complains unless initialized */
+ u8 single_byte = 0;
+ int bit_cntr;
+ int ret;
+
+ if (!eeprom_init)
+ eeprom_reset(dd);
+
+ eeprom_offset = (eeprom_offset << 1) | READ_CMD;
+
+ if (i2c_startcmd(dd, eeprom_offset)) {
+ ipath_dbg("Failed startcmd\n");
+ stop_cmd(dd);
+ ret = 1;
+ goto bail;
+ }
+
+ /*
+ * eeprom keeps clocking data out as long as we ack, automatically
+ * incrementing the address.
+ */
+ while (len-- > 0) {
+ /* get data */
+ single_byte = 0;
+ for (bit_cntr = 8; bit_cntr; bit_cntr--) {
+ u8 bit;
+ scl_out(dd, i2c_line_high);
+ bit = sda_in(dd, 0);
+ single_byte |= bit << (bit_cntr - 1);
+ scl_out(dd, i2c_line_low);
+ }
+
+ /* send ack if not the last byte */
+ if (len)
+ send_ack(dd);
+
+ *((u8 *) buffer) = single_byte;
+ buffer++;
+ }
+
+ stop_cmd(dd);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_eeprom_write - writes data to the eeprom via I2C
+ * @dd: the infinipath device
+ * @eeprom_offset: where to place data
+ * @buffer: data to write
+ * @len: number of bytes to write
+ */
+int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
+ const void *buffer, int len)
+{
+ u8 single_byte;
+ int sub_len;
+ const u8 *bp = buffer;
+ int max_wait_time, i;
+ int ret;
+
+ if (!eeprom_init)
+ eeprom_reset(dd);
+
+ while (len > 0) {
+ if (i2c_startcmd(dd, (eeprom_offset << 1) | WRITE_CMD)) {
+ ipath_dbg("Failed to start cmd offset %u\n",
+ eeprom_offset);
+ goto failed_write;
+ }
+
+ sub_len = min(len, 4);
+ eeprom_offset += sub_len;
+ len -= sub_len;
+
+ for (i = 0; i < sub_len; i++) {
+ if (wr_byte(dd, *bp++)) {
+ ipath_dbg("no ack after byte %u/%u (%u "
+ "total remain)\n", i, sub_len,
+ len + sub_len - i);
+ goto failed_write;
+ }
+ }
+
+ stop_cmd(dd);
+
+ /*
+ * wait for write complete by waiting for a successful
+ * read (the chip replies with a zero after the write
+ * cmd completes, and before it writes to the eeprom.
+ * The startcmd for the read will fail the ack until
+ * the writes have completed. We do this inline to avoid
+ * the debug prints that are in the real read routine
+ * if the startcmd fails.
+ */
+ max_wait_time = 100;
+ while (i2c_startcmd(dd, READ_CMD)) {
+ stop_cmd(dd);
+ if (!--max_wait_time) {
+ ipath_dbg("Did not get successful read to "
+ "complete write\n");
+ goto failed_write;
+ }
+ }
+ /* now read the zero byte */
+ for (i = single_byte = 0; i < 8; i++) {
+ u8 bit;
+ scl_out(dd, i2c_line_high);
+ bit = sda_in(dd, 0);
+ scl_out(dd, i2c_line_low);
+ single_byte <<= 1;
+ single_byte |= bit;
+ }
+ stop_cmd(dd);
+ }
+
+ ret = 0;
+ goto bail;
+
+failed_write:
+ stop_cmd(dd);
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+static u8 flash_csum(struct ipath_flash *ifp, int adjust)
+{
+ u8 *ip = (u8 *) ifp;
+ u8 csum = 0, len;
+
+ for (len = 0; len < ifp->if_length; len++)
+ csum += *ip++;
+ csum -= ifp->if_csum;
+ csum = ~csum;
+ if (adjust)
+ ifp->if_csum = csum;
+
+ return csum;
+}
+
+/**
+ * ipath_get_guid - get the GUID from the i2c device
+ * @dd: the infinipath device
+ *
+ * When we add the multi-chip support, we will probably have to add
+ * the ability to use the number of guids field, and get the guid from
+ * the first chip's flash, to use for all of them.
+ */
+void ipath_get_guid(struct ipath_devdata *dd)
+{
+ void *buf;
+ struct ipath_flash *ifp;
+ __be64 guid;
+ int len;
+ u8 csum, *bguid;
+ int t = dd->ipath_unit;
+ struct ipath_devdata *dd0 = ipath_lookup(0);
+
+ if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
+ u8 *bguid, oguid;
+ dd->ipath_guid = dd0->ipath_guid;
+ bguid = (u8 *) & dd->ipath_guid;
+
+ oguid = bguid[7];
+ bguid[7] += t;
+ if (oguid > bguid[7]) {
+ if (bguid[6] == 0xff) {
+ if (bguid[5] == 0xff) {
+ ipath_dev_err(
+ dd,
+ "Can't set %s GUID from "
+ "base, wraps to OUI!\n",
+ ipath_get_unit_name(t));
+ dd->ipath_guid = 0;
+ goto bail;
+ }
+ bguid[5]++;
+ }
+ bguid[6]++;
+ }
+ dd->ipath_nguid = 1;
+
+ ipath_dbg("nguid %u, so adding %u to device 0 guid, "
+ "for %llx\n",
+ dd0->ipath_nguid, t,
+ (unsigned long long) be64_to_cpu(dd->ipath_guid));
+ goto bail;
+ }
+
+ len = offsetof(struct ipath_flash, if_future);
+ buf = vmalloc(len);
+ if (!buf) {
+ ipath_dev_err(dd, "Couldn't allocate memory to read %u "
+ "bytes from eeprom for GUID\n", len);
+ goto bail;
+ }
+
+ if (ipath_eeprom_read(dd, 0, buf, len)) {
+ ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
+ goto done;
+ }
+ ifp = (struct ipath_flash *)buf;
+
+ csum = flash_csum(ifp, 0);
+ if (csum != ifp->if_csum) {
+ dev_info(&dd->pcidev->dev, "Bad I2C flash checksum: "
+ "0x%x, not 0x%x\n", csum, ifp->if_csum);
+ goto done;
+ }
+ if (*(__be64 *) ifp->if_guid == 0ULL ||
+ *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) {
+ ipath_dev_err(dd, "Invalid GUID %llx from flash; "
+ "ignoring\n",
+ *(unsigned long long *) ifp->if_guid);
+ /* don't allow GUID if all 0 or all 1's */
+ goto done;
+ }
+
+ /* complain, but allow it */
+ if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
+ dev_info(&dd->pcidev->dev, "Warning, GUID %llx is "
+ "default, probably not correct!\n",
+ *(unsigned long long *) ifp->if_guid);
+
+ bguid = ifp->if_guid;
+ if (!bguid[0] && !bguid[1] && !bguid[2]) {
+ /* original incorrect GUID format in flash; fix in
+ * core copy, by shifting up 2 octets; don't need to
+ * change top octet, since both it and shifted are
+ * 0.. */
+ bguid[1] = bguid[3];
+ bguid[2] = bguid[4];
+ bguid[3] = bguid[4] = 0;
+ guid = *(__be64 *) ifp->if_guid;
+ ipath_cdbg(VERBOSE, "Old GUID format in flash, top 3 zero, "
+ "shifting 2 octets\n");
+ } else
+ guid = *(__be64 *) ifp->if_guid;
+ dd->ipath_guid = guid;
+ dd->ipath_nguid = ifp->if_numguid;
+ memcpy(dd->ipath_serial, ifp->if_serial,
+ sizeof(ifp->if_serial));
+ ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
+ (unsigned long long) be64_to_cpu(dd->ipath_guid));
+
+done:
+ vfree(buf);
+
+bail:;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
new file mode 100644
index 0000000000000..c347191f02bf6
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -0,0 +1,1910 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/swap.h>
+#include <linux/vmalloc.h>
+#include <asm/pgtable.h>
+
+#include "ipath_kernel.h"
+#include "ips_common.h"
+#include "ipath_layer.h"
+
+static int ipath_open(struct inode *, struct file *);
+static int ipath_close(struct inode *, struct file *);
+static ssize_t ipath_write(struct file *, const char __user *, size_t,
+ loff_t *);
+static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
+static int ipath_mmap(struct file *, struct vm_area_struct *);
+
+static struct file_operations ipath_file_ops = {
+ .owner = THIS_MODULE,
+ .write = ipath_write,
+ .open = ipath_open,
+ .release = ipath_close,
+ .poll = ipath_poll,
+ .mmap = ipath_mmap
+};
+
+static int ipath_get_base_info(struct ipath_portdata *pd,
+ void __user *ubase, size_t ubase_size)
+{
+ int ret = 0;
+ struct ipath_base_info *kinfo = NULL;
+ struct ipath_devdata *dd = pd->port_dd;
+
+ if (ubase_size < sizeof(*kinfo)) {
+ ipath_cdbg(PROC,
+ "Base size %lu, need %lu (version mismatch?)\n",
+ (unsigned long) ubase_size,
+ (unsigned long) sizeof(*kinfo));
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
+ if (kinfo == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ret = dd->ipath_f_get_base_info(pd, kinfo);
+ if (ret < 0)
+ goto bail;
+
+ kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
+ kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
+ kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
+ kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
+ /*
+ * have to mmap whole thing
+ */
+ kinfo->spi_rcv_egrbuftotlen =
+ pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
+ kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
+ kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
+ pd->port_rcvegrbuf_chunks;
+ kinfo->spi_tidcnt = dd->ipath_rcvtidcnt;
+ /*
+ * for this use, may be ipath_cfgports summed over all chips that
+ * are are configured and present
+ */
+ kinfo->spi_nports = dd->ipath_cfgports;
+ /* unit (chip/board) our port is on */
+ kinfo->spi_unit = dd->ipath_unit;
+ /* for now, only a single page */
+ kinfo->spi_tid_maxsize = PAGE_SIZE;
+
+ /*
+ * Doing this per port, and based on the skip value, etc. This has
+ * to be the actual buffer size, since the protocol code treats it
+ * as an array.
+ *
+ * These have to be set to user addresses in the user code via mmap.
+ * These values are used on return to user code for the mmap target
+ * addresses only. For 32 bit, same 44 bit address problem, so use
+ * the physical address, not virtual. Before 2.6.11, using the
+ * page_address() macro worked, but in 2.6.11, even that returns the
+ * full 64 bit address (upper bits all 1's). So far, using the
+ * physical addresses (or chip offsets, for chip mapping) works, but
+ * no doubt some future kernel release will chang that, and we'll be
+ * on to yet another method of dealing with this
+ */
+ kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
+ kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
+ kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
+ kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
+ (void *) dd->ipath_statusp -
+ (void *) dd->ipath_pioavailregs_dma;
+ kinfo->spi_piobufbase = (u64) pd->port_piobufs;
+ kinfo->__spi_uregbase =
+ dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
+
+ kinfo->spi_pioindex = dd->ipath_pbufsport * (pd->port_port - 1);
+ kinfo->spi_piocnt = dd->ipath_pbufsport;
+ kinfo->spi_pioalign = dd->ipath_palign;
+
+ kinfo->spi_qpair = IPATH_KD_QP;
+ kinfo->spi_piosize = dd->ipath_ibmaxlen;
+ kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
+ kinfo->spi_port = pd->port_port;
+ kinfo->spi_sw_version = IPATH_USER_SWVERSION;
+ kinfo->spi_hw_version = dd->ipath_revision;
+
+ if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
+ ret = -EFAULT;
+
+bail:
+ kfree(kinfo);
+ return ret;
+}
+
+/**
+ * ipath_tid_update - update a port TID
+ * @pd: the port
+ * @ti: the TID information
+ *
+ * The new implementation as of Oct 2004 is that the driver assigns
+ * the tid and returns it to the caller. To make it easier to
+ * catch bugs, and to reduce search time, we keep a cursor for
+ * each port, walking the shadow tid array to find one that's not
+ * in use.
+ *
+ * For now, if we can't allocate the full list, we fail, although
+ * in the long run, we'll allocate as many as we can, and the
+ * caller will deal with that by trying the remaining pages later.
+ * That means that when we fail, we have to mark the tids as not in
+ * use again, in our shadow copy.
+ *
+ * It's up to the caller to free the tids when they are done.
+ * We'll unlock the pages as they free them.
+ *
+ * Also, right now we are locking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance.
+ */
+static int ipath_tid_update(struct ipath_portdata *pd,
+ const struct ipath_tid_info *ti)
+{
+ int ret = 0, ntids;
+ u32 tid, porttid, cnt, i, tidcnt;
+ u16 *tidlist;
+ struct ipath_devdata *dd = pd->port_dd;
+ u64 physaddr;
+ unsigned long vaddr;
+ u64 __iomem *tidbase;
+ unsigned long tidmap[8];
+ struct page **pagep = NULL;
+
+ if (!dd->ipath_pageshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cnt = ti->tidcnt;
+ if (!cnt) {
+ ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
+ (unsigned long long) ti->tidlist);
+ /*
+ * Should we treat as success? likely a bug
+ */
+ ret = -EFAULT;
+ goto done;
+ }
+ tidcnt = dd->ipath_rcvtidcnt;
+ if (cnt >= tidcnt) {
+ /* make sure it all fits in port_tid_pg_list */
+ dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
+ "TIDs, only trying max (%u)\n", cnt, tidcnt);
+ cnt = tidcnt;
+ }
+ pagep = (struct page **)pd->port_tid_pg_list;
+ tidlist = (u16 *) (&pagep[cnt]);
+
+ memset(tidmap, 0, sizeof(tidmap));
+ tid = pd->port_tidcursor;
+ /* before decrement; chip actual # */
+ porttid = pd->port_port * tidcnt;
+ ntids = tidcnt;
+ tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
+ dd->ipath_rcvtidbase +
+ porttid * sizeof(*tidbase));
+
+ ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
+ pd->port_port, cnt, tid, tidbase);
+
+ /* virtual address of first page in transfer */
+ vaddr = ti->tidvaddr;
+ if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
+ cnt * PAGE_SIZE)) {
+ ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
+ (void *)vaddr, cnt);
+ ret = -EFAULT;
+ goto done;
+ }
+ ret = ipath_get_user_pages(vaddr, cnt, pagep);
+ if (ret) {
+ if (ret == -EBUSY) {
+ ipath_dbg("Failed to lock addr %p, %u pages "
+ "(already locked)\n",
+ (void *) vaddr, cnt);
+ /*
+ * for now, continue, and see what happens but with
+ * the new implementation, this should never happen,
+ * unless perhaps the user has mpin'ed the pages
+ * themselves (something we need to test)
+ */
+ ret = 0;
+ } else {
+ dev_info(&dd->pcidev->dev,
+ "Failed to lock addr %p, %u pages: "
+ "errno %d\n", (void *) vaddr, cnt, -ret);
+ goto done;
+ }
+ }
+ for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
+ for (; ntids--; tid++) {
+ if (tid == tidcnt)
+ tid = 0;
+ if (!dd->ipath_pageshadow[porttid + tid])
+ break;
+ }
+ if (ntids < 0) {
+ /*
+ * oops, wrapped all the way through their TIDs,
+ * and didn't have enough free; see comments at
+ * start of routine
+ */
+ ipath_dbg("Not enough free TIDs for %u pages "
+ "(index %d), failing\n", cnt, i);
+ i--; /* last tidlist[i] not filled in */
+ ret = -ENOMEM;
+ break;
+ }
+ tidlist[i] = tid;
+ ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
+ "vaddr %lx\n", i, tid, vaddr);
+ /* we "know" system pages and TID pages are same size */
+ dd->ipath_pageshadow[porttid + tid] = pagep[i];
+ /*
+ * don't need atomic or it's overhead
+ */
+ __set_bit(tid, tidmap);
+ physaddr = page_to_phys(pagep[i]);
+ ipath_stats.sps_pagelocks++;
+ ipath_cdbg(VERBOSE,
+ "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
+ tid, vaddr, (unsigned long long) physaddr,
+ pagep[i]);
+ dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr);
+ /*
+ * don't check this tid in ipath_portshadow, since we
+ * just filled it in; start with the next one.
+ */
+ tid++;
+ }
+
+ if (ret) {
+ u32 limit;
+ cleanup:
+ /* jump here if copy out of updated info failed... */
+ ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
+ -ret, i, cnt);
+ /* same code that's in ipath_free_tid() */
+ limit = sizeof(tidmap) * BITS_PER_BYTE;
+ if (limit > tidcnt)
+ /* just in case size changes in future */
+ limit = tidcnt;
+ tid = find_first_bit((const unsigned long *)tidmap, limit);
+ for (; tid < limit; tid++) {
+ if (!test_bit(tid, tidmap))
+ continue;
+ if (dd->ipath_pageshadow[porttid + tid]) {
+ ipath_cdbg(VERBOSE, "Freeing TID %u\n",
+ tid);
+ dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
+ dd->ipath_tidinvalid);
+ dd->ipath_pageshadow[porttid + tid] = NULL;
+ ipath_stats.sps_pageunlocks++;
+ }
+ }
+ ipath_release_user_pages(pagep, cnt);
+ } else {
+ /*
+ * Copy the updated array, with ipath_tid's filled in, back
+ * to user. Since we did the copy in already, this "should
+ * never fail" If it does, we have to clean up...
+ */
+ if (copy_to_user((void __user *)
+ (unsigned long) ti->tidlist,
+ tidlist, cnt * sizeof(*tidlist))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
+ tidmap, sizeof tidmap)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ if (tid == tidcnt)
+ tid = 0;
+ pd->port_tidcursor = tid;
+ }
+
+done:
+ if (ret)
+ ipath_dbg("Failed to map %u TID pages, failing with %d\n",
+ ti->tidcnt, -ret);
+ return ret;
+}
+
+/**
+ * ipath_tid_free - free a port TID
+ * @pd: the port
+ * @ti: the TID info
+ *
+ * right now we are unlocking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance. We check that the TID is in range for this port
+ * but otherwise don't check validity; if user has an error and
+ * frees the wrong tid, it's only their own data that can thereby
+ * be corrupted. We do check that the TID was in use, for sanity
+ * We always use our idea of the saved address, not the address that
+ * they pass in to us.
+ */
+
+static int ipath_tid_free(struct ipath_portdata *pd,
+ const struct ipath_tid_info *ti)
+{
+ int ret = 0;
+ u32 tid, porttid, cnt, limit, tidcnt;
+ struct ipath_devdata *dd = pd->port_dd;
+ u64 __iomem *tidbase;
+ unsigned long tidmap[8];
+
+ if (!dd->ipath_pageshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
+ sizeof tidmap)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ porttid = pd->port_port * dd->ipath_rcvtidcnt;
+ tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
+ dd->ipath_rcvtidbase +
+ porttid * sizeof(*tidbase));
+
+ tidcnt = dd->ipath_rcvtidcnt;
+ limit = sizeof(tidmap) * BITS_PER_BYTE;
+ if (limit > tidcnt)
+ /* just in case size changes in future */
+ limit = tidcnt;
+ tid = find_first_bit(tidmap, limit);
+ ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
+ "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
+ limit, tid, porttid);
+ for (cnt = 0; tid < limit; tid++) {
+ /*
+ * small optimization; if we detect a run of 3 or so without
+ * any set, use find_first_bit again. That's mainly to
+ * accelerate the case where we wrapped, so we have some at
+ * the beginning, and some at the end, and a big gap
+ * in the middle.
+ */
+ if (!test_bit(tid, tidmap))
+ continue;
+ cnt++;
+ if (dd->ipath_pageshadow[porttid + tid]) {
+ ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
+ pd->port_pid, tid);
+ dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
+ dd->ipath_tidinvalid);
+ ipath_release_user_pages(
+ &dd->ipath_pageshadow[porttid + tid], 1);
+ dd->ipath_pageshadow[porttid + tid] = NULL;
+ ipath_stats.sps_pageunlocks++;
+ } else
+ ipath_dbg("Unused tid %u, ignoring\n", tid);
+ }
+ if (cnt != ti->tidcnt)
+ ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
+ ti->tidcnt, cnt);
+done:
+ if (ret)
+ ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
+ ti->tidcnt, -ret);
+ return ret;
+}
+
+/**
+ * ipath_set_part_key - set a partition key
+ * @pd: the port
+ * @key: the key
+ *
+ * We can have up to 4 active at a time (other than the default, which is
+ * always allowed). This is somewhat tricky, since multiple ports may set
+ * the same key, so we reference count them, and clean up at exit. All 4
+ * partition keys are packed into a single infinipath register. It's an
+ * error for a process to set the same pkey multiple times. We provide no
+ * mechanism to de-allocate a pkey at this time, we may eventually need to
+ * do that. I've used the atomic operations, and no locking, and only make
+ * a single pass through what's available. This should be more than
+ * adequate for some time. I'll think about spinlocks or the like if and as
+ * it's necessary.
+ */
+static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
+{
+ struct ipath_devdata *dd = pd->port_dd;
+ int i, any = 0, pidx = -1;
+ u16 lkey = key & 0x7FFF;
+ int ret;
+
+ if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) {
+ /* nothing to do; this key always valid */
+ ret = 0;
+ goto bail;
+ }
+
+ ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
+ "%hx:%x %hx:%x %hx:%x %hx:%x\n",
+ pd->port_port, key, dd->ipath_pkeys[0],
+ atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
+ atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
+ atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
+ atomic_read(&dd->ipath_pkeyrefs[3]));
+
+ if (!lkey) {
+ ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
+ pd->port_port);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Set the full membership bit, because it has to be
+ * set in the register or the packet, and it seems
+ * cleaner to set in the register than to force all
+ * callers to set it. (see bug 4331)
+ */
+ key |= 0x8000;
+
+ for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
+ if (!pd->port_pkeys[i] && pidx == -1)
+ pidx = i;
+ if (pd->port_pkeys[i] == key) {
+ ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
+ "(%x) more than once\n",
+ pd->port_port, key);
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (pidx == -1) {
+ ipath_dbg("All pkeys for port %u already in use, "
+ "can't set %x\n", pd->port_port, key);
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+ if (!dd->ipath_pkeys[i]) {
+ any++;
+ continue;
+ }
+ if (dd->ipath_pkeys[i] == key) {
+ atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
+
+ if (atomic_inc_return(pkrefs) > 1) {
+ pd->port_pkeys[pidx] = key;
+ ipath_cdbg(VERBOSE, "p%u set key %x "
+ "matches #%d, count now %d\n",
+ pd->port_port, key, i,
+ atomic_read(pkrefs));
+ ret = 0;
+ goto bail;
+ } else {
+ /*
+ * lost race, decrement count, catch below
+ */
+ atomic_dec(pkrefs);
+ ipath_cdbg(VERBOSE, "Lost race, count was "
+ "0, after dec, it's %d\n",
+ atomic_read(pkrefs));
+ any++;
+ }
+ }
+ if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
+ /*
+ * It makes no sense to have both the limited and
+ * full membership PKEY set at the same time since
+ * the unlimited one will disable the limited one.
+ */
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ipath_dbg("port %u, all pkeys already in use, "
+ "can't set %x\n", pd->port_port, key);
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+ if (!dd->ipath_pkeys[i] &&
+ atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
+ u64 pkey;
+
+ /* for ipathstats, etc. */
+ ipath_stats.sps_pkeys[i] = lkey;
+ pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
+ pkey =
+ (u64) dd->ipath_pkeys[0] |
+ ((u64) dd->ipath_pkeys[1] << 16) |
+ ((u64) dd->ipath_pkeys[2] << 32) |
+ ((u64) dd->ipath_pkeys[3] << 48);
+ ipath_cdbg(PROC, "p%u set key %x in #%d, "
+ "portidx %d, new pkey reg %llx\n",
+ pd->port_port, key, i, pidx,
+ (unsigned long long) pkey);
+ ipath_write_kreg(
+ dd, dd->ipath_kregs->kr_partitionkey, pkey);
+
+ ret = 0;
+ goto bail;
+ }
+ }
+ ipath_dbg("port %u, all pkeys already in use 2nd pass, "
+ "can't set %x\n", pd->port_port, key);
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_manage_rcvq - manage a port's receive queue
+ * @pd: the port
+ * @start_stop: action to carry out
+ *
+ * start_stop == 0 disables receive on the port, for use in queue
+ * overflow conditions. start_stop==1 re-enables, to be used to
+ * re-init the software copy of the head register
+ */
+static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop)
+{
+ struct ipath_devdata *dd = pd->port_dd;
+ u64 tval;
+
+ ipath_cdbg(PROC, "%sabling rcv for unit %u port %u\n",
+ start_stop ? "en" : "dis", dd->ipath_unit,
+ pd->port_port);
+ /* atomically clear receive enable port. */
+ if (start_stop) {
+ /*
+ * On enable, force in-memory copy of the tail register to
+ * 0, so that protocol code doesn't have to worry about
+ * whether or not the chip has yet updated the in-memory
+ * copy or not on return from the system call. The chip
+ * always resets it's tail register back to 0 on a
+ * transition from disabled to enabled. This could cause a
+ * problem if software was broken, and did the enable w/o
+ * the disable, but eventually the in-memory copy will be
+ * updated and correct itself, even in the face of software
+ * bugs.
+ */
+ *pd->port_rcvhdrtail_kvaddr = 0;
+ set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
+ &dd->ipath_rcvctrl);
+ } else
+ clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
+ &dd->ipath_rcvctrl);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+ /* now be sure chip saw it before we return */
+ tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ if (start_stop) {
+ /*
+ * And try to be sure that tail reg update has happened too.
+ * This should in theory interlock with the RXE changes to
+ * the tail register. Don't assign it to the tail register
+ * in memory copy, since we could overwrite an update by the
+ * chip if we did.
+ */
+ tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
+ }
+ /* always; new head should be equal to new tail; see above */
+ return 0;
+}
+
+static void ipath_clean_part_key(struct ipath_portdata *pd,
+ struct ipath_devdata *dd)
+{
+ int i, j, pchanged = 0;
+ u64 oldpkey;
+
+ /* for debugging only */
+ oldpkey = (u64) dd->ipath_pkeys[0] |
+ ((u64) dd->ipath_pkeys[1] << 16) |
+ ((u64) dd->ipath_pkeys[2] << 32) |
+ ((u64) dd->ipath_pkeys[3] << 48);
+
+ for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
+ if (!pd->port_pkeys[i])
+ continue;
+ ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
+ pd->port_pkeys[i]);
+ for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
+ /* check for match independent of the global bit */
+ if ((dd->ipath_pkeys[j] & 0x7fff) !=
+ (pd->port_pkeys[i] & 0x7fff))
+ continue;
+ if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
+ ipath_cdbg(VERBOSE, "p%u clear key "
+ "%x matches #%d\n",
+ pd->port_port,
+ pd->port_pkeys[i], j);
+ ipath_stats.sps_pkeys[j] =
+ dd->ipath_pkeys[j] = 0;
+ pchanged++;
+ }
+ else ipath_cdbg(
+ VERBOSE, "p%u key %x matches #%d, "
+ "but ref still %d\n", pd->port_port,
+ pd->port_pkeys[i], j,
+ atomic_read(&dd->ipath_pkeyrefs[j]));
+ break;
+ }
+ pd->port_pkeys[i] = 0;
+ }
+ if (pchanged) {
+ u64 pkey = (u64) dd->ipath_pkeys[0] |
+ ((u64) dd->ipath_pkeys[1] << 16) |
+ ((u64) dd->ipath_pkeys[2] << 32) |
+ ((u64) dd->ipath_pkeys[3] << 48);
+ ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
+ "new pkey reg %llx\n", pd->port_port,
+ (unsigned long long) oldpkey,
+ (unsigned long long) pkey);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
+ pkey);
+ }
+}
+
+/**
+ * ipath_create_user_egr - allocate eager TID buffers
+ * @pd: the port to allocate TID buffers for
+ *
+ * This routine is now quite different for user and kernel, because
+ * the kernel uses skb's, for the accelerated network performance
+ * This is the user port version
+ *
+ * Allocate the eager TID buffers and program them into infinipath
+ * They are no longer completely contiguous, we do multiple allocation
+ * calls.
+ */
+static int ipath_create_user_egr(struct ipath_portdata *pd)
+{
+ struct ipath_devdata *dd = pd->port_dd;
+ unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff;
+ size_t size;
+ int ret;
+
+ egrcnt = dd->ipath_rcvegrcnt;
+ /* TID number offset for this port */
+ egroff = pd->port_port * egrcnt;
+ egrsize = dd->ipath_rcvegrbufsize;
+ ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
+ "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
+
+ /*
+ * to avoid wasting a lot of memory, we allocate 32KB chunks of
+ * physically contiguous memory, advance through it until used up
+ * and then allocate more. Of course, we need memory to store those
+ * extra pointers, now. Started out with 256KB, but under heavy
+ * memory pressure (creating large files and then copying them over
+ * NFS while doing lots of MPI jobs), we hit some allocation
+ * failures, even though we can sleep... (2.6.10) Still get
+ * failures at 64K. 32K is the lowest we can go without waiting
+ * more memory again. It seems likely that the coalescing in
+ * free_pages, etc. still has issues (as it has had previously
+ * during 2.6.x development).
+ */
+ size = 0x8000;
+ alloced = ALIGN(egrsize * egrcnt, size);
+ egrperchunk = size / egrsize;
+ chunk = (egrcnt + egrperchunk - 1) / egrperchunk;
+ pd->port_rcvegrbuf_chunks = chunk;
+ pd->port_rcvegrbufs_perchunk = egrperchunk;
+ pd->port_rcvegrbuf_size = size;
+ pd->port_rcvegrbuf = vmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]));
+ if (!pd->port_rcvegrbuf) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ pd->port_rcvegrbuf_phys =
+ vmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]));
+ if (!pd->port_rcvegrbuf_phys) {
+ ret = -ENOMEM;
+ goto bail_rcvegrbuf;
+ }
+ for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
+ /*
+ * GFP_USER, but without GFP_FS, so buffer cache can be
+ * coalesced (we hope); otherwise, even at order 4,
+ * heavy filesystem activity makes these fail
+ */
+ gfp_t gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+
+ pd->port_rcvegrbuf[e] = dma_alloc_coherent(
+ &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
+ gfp_flags);
+
+ if (!pd->port_rcvegrbuf[e]) {
+ ret = -ENOMEM;
+ goto bail_rcvegrbuf_phys;
+ }
+ }
+
+ pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
+
+ for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
+ dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
+ unsigned i;
+
+ for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
+ dd->ipath_f_put_tid(dd, e + egroff +
+ (u64 __iomem *)
+ ((char __iomem *)
+ dd->ipath_kregbase +
+ dd->ipath_rcvegrbase), 0, pa);
+ pa += egrsize;
+ }
+ cond_resched(); /* don't hog the cpu */
+ }
+
+ ret = 0;
+ goto bail;
+
+bail_rcvegrbuf_phys:
+ for (e = 0; e < pd->port_rcvegrbuf_chunks &&
+ pd->port_rcvegrbuf[e]; e++)
+ dma_free_coherent(&dd->pcidev->dev, size,
+ pd->port_rcvegrbuf[e],
+ pd->port_rcvegrbuf_phys[e]);
+
+ vfree(pd->port_rcvegrbuf_phys);
+ pd->port_rcvegrbuf_phys = NULL;
+bail_rcvegrbuf:
+ vfree(pd->port_rcvegrbuf);
+ pd->port_rcvegrbuf = NULL;
+bail:
+ return ret;
+}
+
+static int ipath_do_user_init(struct ipath_portdata *pd,
+ const struct ipath_user_info *uinfo)
+{
+ int ret = 0;
+ struct ipath_devdata *dd = pd->port_dd;
+ u64 physaddr, uaddr, off, atmp;
+ struct page *pagep;
+ u32 head32;
+ u64 head;
+
+ /* for now, if major version is different, bail */
+ if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
+ dev_info(&dd->pcidev->dev,
+ "User major version %d not same as driver "
+ "major %d\n", uinfo->spu_userversion >> 16,
+ IPATH_USER_SWMAJOR);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
+ ipath_dbg("User minor version %d not same as driver "
+ "minor %d\n", uinfo->spu_userversion & 0xffff,
+ IPATH_USER_SWMINOR);
+
+ if (uinfo->spu_rcvhdrsize) {
+ ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
+ if (ret)
+ goto done;
+ }
+
+ /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
+
+ /* set up for the rcvhdr Q tail register writeback to user memory */
+ if (!uinfo->spu_rcvhdraddr ||
+ !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long)
+ uinfo->spu_rcvhdraddr, sizeof(u64))) {
+ ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n",
+ pd->port_port,
+ (unsigned long long) uinfo->spu_rcvhdraddr);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ off = offset_in_page(uinfo->spu_rcvhdraddr);
+ uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr;
+ ret = ipath_get_user_pages_nocopy(uaddr, &pagep);
+ if (ret) {
+ dev_info(&dd->pcidev->dev, "Failed to lookup and lock "
+ "address %llx for rcvhdrtail: errno %d\n",
+ (unsigned long long) uinfo->spu_rcvhdraddr, -ret);
+ goto done;
+ }
+ ipath_stats.sps_pagelocks++;
+ pd->port_rcvhdrtail_uaddr = uaddr;
+ pd->port_rcvhdrtail_pagep = pagep;
+ pd->port_rcvhdrtail_kvaddr =
+ page_address(pagep);
+ pd->port_rcvhdrtail_kvaddr += off;
+ physaddr = page_to_phys(pagep) + off;
+ ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx "
+ "physical (off=%llx)\n",
+ pd->port_port,
+ (unsigned long long) uinfo->spu_rcvhdraddr,
+ (unsigned long long) physaddr, (unsigned long long) off);
+ ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
+ pd->port_port, physaddr);
+ atmp = ipath_read_kreg64_port(dd,
+ dd->ipath_kregs->kr_rcvhdrtailaddr,
+ pd->port_port);
+ if (physaddr != atmp) {
+ ipath_dev_err(dd,
+ "Catastrophic software error, "
+ "RcvHdrTailAddr%u written as %llx, "
+ "read back as %llx\n", pd->port_port,
+ (unsigned long long) physaddr,
+ (unsigned long long) atmp);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* for right now, kernel piobufs are at end, so port 1 is at 0 */
+ pd->port_piobufs = dd->ipath_piobufbase +
+ dd->ipath_pbufsport * (pd->port_port -
+ 1) * dd->ipath_palign;
+ ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n",
+ pd->port_port, pd->port_piobufs);
+
+ /*
+ * Now allocate the rcvhdr Q and eager TIDs; skip the TID
+ * array for time being. If pd->port_port > chip-supported,
+ * we need to do extra stuff here to handle by handling overflow
+ * through port 0, someday
+ */
+ ret = ipath_create_rcvhdrq(dd, pd);
+ if (!ret)
+ ret = ipath_create_user_egr(pd);
+ if (ret)
+ goto done;
+ /* enable receives now */
+ /* atomically set enable bit for this port */
+ set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
+ &dd->ipath_rcvctrl);
+
+ /*
+ * set the head registers for this port to the current values
+ * of the tail pointers, since we don't know if they were
+ * updated on last use of the port.
+ */
+ head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
+ head = (u64) head32;
+ ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port);
+ head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
+ ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
+ dd->ipath_lastegrheads[pd->port_port] = -1;
+ dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
+ ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from "
+ "tail regs\n", pd->port_port,
+ (unsigned long long) head, head32);
+ pd->port_tidcursor = 0; /* start at beginning after open */
+ /*
+ * now enable the port; the tail registers will be written to memory
+ * by the chip as soon as it sees the write to
+ * dd->ipath_kregs->kr_rcvctrl. The update only happens on
+ * transition from 0 to 1, so clear it first, then set it as part of
+ * enabling the port. This will (very briefly) affect any other
+ * open ports, but it shouldn't be long enough to be an issue.
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+
+done:
+ return ret;
+}
+
+static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
+ u64 ureg)
+{
+ unsigned long phys;
+ int ret;
+
+ /* it's the real hardware, so io_remap works */
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
+ "%lx > PAGE\n", vma->vm_end - vma->vm_start);
+ ret = -EFAULT;
+ } else {
+ phys = dd->ipath_physaddr + ureg;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+ return ret;
+}
+
+static int mmap_piobufs(struct vm_area_struct *vma,
+ struct ipath_devdata *dd,
+ struct ipath_portdata *pd)
+{
+ unsigned long phys;
+ int ret;
+
+ /*
+ * When we map the PIO buffers, we want to map them as writeonly, no
+ * read possible.
+ */
+
+ if ((vma->vm_end - vma->vm_start) >
+ (dd->ipath_pbufsport * dd->ipath_palign)) {
+ dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
+ "reqlen %lx > PAGE\n",
+ vma->vm_end - vma->vm_start);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ phys = dd->ipath_physaddr + pd->port_piobufs;
+ /*
+ * Do *NOT* mark this as non-cached (PWT bit), or we don't get the
+ * write combining behavior we want on the PIO buffers!
+ * vma->vm_page_prot =
+ * pgprot_noncached(vma->vm_page_prot);
+ */
+
+ if (vma->vm_flags & VM_READ) {
+ dev_info(&dd->pcidev->dev,
+ "Can't map piobufs as readable (flags=%lx)\n",
+ vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ /* don't allow them to later change to readable with mprotect */
+
+ vma->vm_flags &= ~VM_MAYWRITE;
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+bail:
+ return ret;
+}
+
+static int mmap_rcvegrbufs(struct vm_area_struct *vma,
+ struct ipath_portdata *pd)
+{
+ struct ipath_devdata *dd = pd->port_dd;
+ unsigned long start, size;
+ size_t total_size, i;
+ dma_addr_t *phys;
+ int ret;
+
+ if (!pd->port_rcvegrbuf) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ size = pd->port_rcvegrbuf_size;
+ total_size = pd->port_rcvegrbuf_chunks * size;
+ if ((vma->vm_end - vma->vm_start) > total_size) {
+ dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
+ "reqlen %lx > actual %lx\n",
+ vma->vm_end - vma->vm_start,
+ (unsigned long) total_size);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ if (vma->vm_flags & VM_WRITE) {
+ dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
+ "writable (flags=%lx)\n", vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ start = vma->vm_start;
+ phys = pd->port_rcvegrbuf_phys;
+
+ /* don't allow them to later change to writeable with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
+ ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (ret < 0)
+ goto bail;
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int mmap_rcvhdrq(struct vm_area_struct *vma,
+ struct ipath_portdata *pd)
+{
+ struct ipath_devdata *dd = pd->port_dd;
+ size_t total_size;
+ int ret;
+
+ /*
+ * kmalloc'ed memory, physically contiguous; this is from
+ * spi_rcvhdr_base; we allow user to map read-write so they can
+ * write hdrq entries to allow protocol code to directly poll
+ * whether a hdrq entry has been written.
+ */
+ total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE);
+ if ((vma->vm_end - vma->vm_start) > total_size) {
+ dev_info(&dd->pcidev->dev,
+ "FAIL on rcvhdrq: reqlen %lx > actual %lx\n",
+ vma->vm_end - vma->vm_start,
+ (unsigned long) total_size);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pd->port_rcvhdrq_phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+bail:
+ return ret;
+}
+
+static int mmap_pioavailregs(struct vm_area_struct *vma,
+ struct ipath_portdata *pd)
+{
+ struct ipath_devdata *dd = pd->port_dd;
+ int ret;
+
+ /*
+ * when we map the PIO bufferavail registers, we want to map them as
+ * readonly, no write possible.
+ *
+ * kmalloc'ed memory, physically contiguous, one page only, readonly
+ */
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: "
+ "reqlen %lx > actual %lx\n",
+ vma->vm_end - vma->vm_start,
+ (unsigned long) PAGE_SIZE);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ if (vma->vm_flags & VM_WRITE) {
+ dev_info(&dd->pcidev->dev,
+ "Can't map pioavailregs as writable (flags=%lx)\n",
+ vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ /* don't allow them to later change with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ ret = remap_pfn_range(vma, vma->vm_start,
+ dd->ipath_pioavailregs_phys >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot);
+bail:
+ return ret;
+}
+
+/**
+ * ipath_mmap - mmap various structures into user space
+ * @fp: the file pointer
+ * @vma: the VM area
+ *
+ * We use this to have a shared buffer between the kernel and the user code
+ * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
+ * buffers in the chip. We have the open and close entries so we can bump
+ * the ref count and keep the driver from being unloaded while still mapped.
+ */
+static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct ipath_portdata *pd;
+ struct ipath_devdata *dd;
+ u64 pgaddr, ureg;
+ int ret;
+
+ pd = port_fp(fp);
+ dd = pd->port_dd;
+ /*
+ * This is the ipath_do_user_init() code, mapping the shared buffers
+ * into the user process. The address referred to by vm_pgoff is the
+ * virtual, not physical, address; we only do one mmap for each
+ * space mapped.
+ */
+ pgaddr = vma->vm_pgoff << PAGE_SHIFT;
+
+ /*
+ * note that ureg does *NOT* have the kregvirt as part of it, to be
+ * sure that for 32 bit programs, we don't end up trying to map a >
+ * 44 address. Has to match ipath_get_base_info() code that sets
+ * __spi_uregbase
+ */
+
+ ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
+
+ ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
+ (unsigned long long) pgaddr, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+
+ if (pgaddr == ureg)
+ ret = mmap_ureg(vma, dd, ureg);
+ else if (pgaddr == pd->port_piobufs)
+ ret = mmap_piobufs(vma, dd, pd);
+ else if (pgaddr == (u64) pd->port_rcvegr_phys)
+ ret = mmap_rcvegrbufs(vma, pd);
+ else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
+ ret = mmap_rcvhdrq(vma, pd);
+ else if (pgaddr == dd->ipath_pioavailregs_phys)
+ ret = mmap_pioavailregs(vma, pd);
+ else
+ ret = -EINVAL;
+
+ vma->vm_private_data = NULL;
+
+ if (ret < 0)
+ dev_info(&dd->pcidev->dev,
+ "Failure %d on addr %lx, off %lx\n",
+ -ret, vma->vm_start, vma->vm_pgoff);
+
+ return ret;
+}
+
+static unsigned int ipath_poll(struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct ipath_portdata *pd;
+ u32 head, tail;
+ int bit;
+ struct ipath_devdata *dd;
+
+ pd = port_fp(fp);
+ dd = pd->port_dd;
+
+ bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT;
+ set_bit(bit, &dd->ipath_rcvctrl);
+
+ /*
+ * Before blocking, make sure that head is still == tail,
+ * reading from the chip, so we can be sure the interrupt
+ * enable has made it to the chip. If not equal, disable
+ * interrupt again and return immediately. This avoids races,
+ * and the overhead of the chip read doesn't matter much at
+ * this point, since we are waiting for something anyway.
+ */
+
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+
+ head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
+ tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
+
+ if (tail == head) {
+ set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
+ poll_wait(fp, &pd->port_wait, pt);
+
+ if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
+ /* timed out, no packets received */
+ clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
+ pd->port_rcvwait_to++;
+ }
+ }
+ else {
+ /* it's already happened; don't do wait_event overhead */
+ pd->port_rcvnowait++;
+ }
+
+ clear_bit(bit, &dd->ipath_rcvctrl);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+
+ return 0;
+}
+
+static int try_alloc_port(struct ipath_devdata *dd, int port,
+ struct file *fp)
+{
+ int ret;
+
+ if (!dd->ipath_pd[port]) {
+ void *p, *ptmp;
+
+ p = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
+
+ /*
+ * Allocate memory for use in ipath_tid_update() just once
+ * at open, not per call. Reduces cost of expected send
+ * setup.
+ */
+ ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
+ dd->ipath_rcvtidcnt * sizeof(struct page **),
+ GFP_KERNEL);
+ if (!p || !ptmp) {
+ ipath_dev_err(dd, "Unable to allocate portdata "
+ "memory, failing open\n");
+ ret = -ENOMEM;
+ kfree(p);
+ kfree(ptmp);
+ goto bail;
+ }
+ dd->ipath_pd[port] = p;
+ dd->ipath_pd[port]->port_port = port;
+ dd->ipath_pd[port]->port_dd = dd;
+ dd->ipath_pd[port]->port_tid_pg_list = ptmp;
+ init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
+ }
+ if (!dd->ipath_pd[port]->port_cnt) {
+ dd->ipath_pd[port]->port_cnt = 1;
+ fp->private_data = (void *) dd->ipath_pd[port];
+ ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
+ current->comm, current->pid, dd->ipath_unit,
+ port);
+ dd->ipath_pd[port]->port_pid = current->pid;
+ strncpy(dd->ipath_pd[port]->port_comm, current->comm,
+ sizeof(dd->ipath_pd[port]->port_comm));
+ ipath_stats.sps_ports++;
+ ret = 0;
+ goto bail;
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+static inline int usable(struct ipath_devdata *dd)
+{
+ return dd &&
+ (dd->ipath_flags & IPATH_PRESENT) &&
+ dd->ipath_kregbase &&
+ dd->ipath_lid &&
+ !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
+ | IPATH_LINKUNK));
+}
+
+static int find_free_port(int unit, struct file *fp)
+{
+ struct ipath_devdata *dd = ipath_lookup(unit);
+ int ret, i;
+
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ if (!usable(dd)) {
+ ret = -ENETDOWN;
+ goto bail;
+ }
+
+ for (i = 0; i < dd->ipath_cfgports; i++) {
+ ret = try_alloc_port(dd, i, fp);
+ if (ret != -EBUSY)
+ goto bail;
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+static int find_best_unit(struct file *fp)
+{
+ int ret = 0, i, prefunit = -1, devmax;
+ int maxofallports, npresent, nup;
+ int ndev;
+
+ (void) ipath_count_units(&npresent, &nup, &maxofallports);
+
+ /*
+ * This code is present to allow a knowledgeable person to
+ * specify the layout of processes to processors before opening
+ * this driver, and then we'll assign the process to the "closest"
+ * HT-400 to that processor (we assume reasonable connectivity,
+ * for now). This code assumes that if affinity has been set
+ * before this point, that at most one cpu is set; for now this
+ * is reasonable. I check for both cpus_empty() and cpus_full(),
+ * in case some kernel variant sets none of the bits when no
+ * affinity is set. 2.6.11 and 12 kernels have all present
+ * cpus set. Some day we'll have to fix it up further to handle
+ * a cpu subset. This algorithm fails for two HT-400's connected
+ * in tunnel fashion. Eventually this needs real topology
+ * information. There may be some issues with dual core numbering
+ * as well. This needs more work prior to release.
+ */
+ if (!cpus_empty(current->cpus_allowed) &&
+ !cpus_full(current->cpus_allowed)) {
+ int ncpus = num_online_cpus(), curcpu = -1;
+ for (i = 0; i < ncpus; i++)
+ if (cpu_isset(i, current->cpus_allowed)) {
+ ipath_cdbg(PROC, "%s[%u] affinity set for "
+ "cpu %d\n", current->comm,
+ current->pid, i);
+ curcpu = i;
+ }
+ if (curcpu != -1) {
+ if (npresent) {
+ prefunit = curcpu / (ncpus / npresent);
+ ipath_dbg("%s[%u] %d chips, %d cpus, "
+ "%d cpus/chip, select unit %d\n",
+ current->comm, current->pid,
+ npresent, ncpus, ncpus / npresent,
+ prefunit);
+ }
+ }
+ }
+
+ /*
+ * user ports start at 1, kernel port is 0
+ * For now, we do round-robin access across all chips
+ */
+
+ if (prefunit != -1)
+ devmax = prefunit + 1;
+ else
+ devmax = ipath_count_units(NULL, NULL, NULL);
+recheck:
+ for (i = 1; i < maxofallports; i++) {
+ for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
+ ndev++) {
+ struct ipath_devdata *dd = ipath_lookup(ndev);
+
+ if (!usable(dd))
+ continue; /* can't use this unit */
+ if (i >= dd->ipath_cfgports)
+ /*
+ * Maxed out on users of this unit. Try
+ * next.
+ */
+ continue;
+ ret = try_alloc_port(dd, i, fp);
+ if (!ret)
+ goto done;
+ }
+ }
+
+ if (npresent) {
+ if (nup == 0) {
+ ret = -ENETDOWN;
+ ipath_dbg("No ports available (none initialized "
+ "and ready)\n");
+ } else {
+ if (prefunit > 0) {
+ /* if started above 0, retry from 0 */
+ ipath_cdbg(PROC,
+ "%s[%u] no ports on prefunit "
+ "%d, clear and re-check\n",
+ current->comm, current->pid,
+ prefunit);
+ devmax = ipath_count_units(NULL, NULL,
+ NULL);
+ prefunit = -1;
+ goto recheck;
+ }
+ ret = -EBUSY;
+ ipath_dbg("No ports available\n");
+ }
+ } else {
+ ret = -ENXIO;
+ ipath_dbg("No boards found\n");
+ }
+
+done:
+ return ret;
+}
+
+static int ipath_open(struct inode *in, struct file *fp)
+{
+ int ret, minor;
+
+ mutex_lock(&ipath_mutex);
+
+ minor = iminor(in);
+ ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
+ (long)in->i_rdev, minor);
+
+ if (minor)
+ ret = find_free_port(minor - 1, fp);
+ else
+ ret = find_best_unit(fp);
+
+ mutex_unlock(&ipath_mutex);
+ return ret;
+}
+
+/**
+ * unlock_exptid - unlock any expected TID entries port still had in use
+ * @pd: port
+ *
+ * We don't actually update the chip here, because we do a bulk update
+ * below, using ipath_f_clear_tids.
+ */
+static void unlock_expected_tids(struct ipath_portdata *pd)
+{
+ struct ipath_devdata *dd = pd->port_dd;
+ int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
+ int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
+
+ ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
+ pd->port_port);
+ for (i = port_tidbase; i < maxtid; i++) {
+ if (!dd->ipath_pageshadow[i])
+ continue;
+
+ ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
+ 1);
+ dd->ipath_pageshadow[i] = NULL;
+ cnt++;
+ ipath_stats.sps_pageunlocks++;
+ }
+ if (cnt)
+ ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
+ pd->port_port, cnt);
+
+ if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
+ ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
+ (unsigned long long) ipath_stats.sps_pagelocks,
+ (unsigned long long)
+ ipath_stats.sps_pageunlocks);
+}
+
+static int ipath_close(struct inode *in, struct file *fp)
+{
+ int ret = 0;
+ struct ipath_portdata *pd;
+ struct ipath_devdata *dd;
+ unsigned port;
+
+ ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
+ (long)in->i_rdev, fp->private_data);
+
+ mutex_lock(&ipath_mutex);
+
+ pd = port_fp(fp);
+ port = pd->port_port;
+ fp->private_data = NULL;
+ dd = pd->port_dd;
+
+ if (pd->port_hdrqfull) {
+ ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
+ "during run\n", pd->port_comm, pd->port_pid,
+ pd->port_hdrqfull);
+ pd->port_hdrqfull = 0;
+ }
+
+ if (pd->port_rcvwait_to || pd->port_piowait_to
+ || pd->port_rcvnowait || pd->port_pionowait) {
+ ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
+ "%u rcv %u, pio already\n",
+ pd->port_port, pd->port_rcvwait_to,
+ pd->port_piowait_to, pd->port_rcvnowait,
+ pd->port_pionowait);
+ pd->port_rcvwait_to = pd->port_piowait_to =
+ pd->port_rcvnowait = pd->port_pionowait = 0;
+ }
+ if (pd->port_flag) {
+ ipath_dbg("port %u port_flag still set to 0x%lx\n",
+ pd->port_port, pd->port_flag);
+ pd->port_flag = 0;
+ }
+
+ if (dd->ipath_kregbase) {
+ if (pd->port_rcvhdrtail_uaddr) {
+ pd->port_rcvhdrtail_uaddr = 0;
+ pd->port_rcvhdrtail_kvaddr = NULL;
+ ipath_release_user_pages_on_close(
+ &pd->port_rcvhdrtail_pagep, 1);
+ pd->port_rcvhdrtail_pagep = NULL;
+ ipath_stats.sps_pageunlocks++;
+ }
+ ipath_write_kreg_port(
+ dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
+ port, 0ULL);
+ ipath_write_kreg_port(
+ dd, dd->ipath_kregs->kr_rcvhdraddr,
+ pd->port_port, 0);
+
+ /* clean up the pkeys for this port user */
+ ipath_clean_part_key(pd, dd);
+
+ if (port < dd->ipath_cfgports) {
+ int i = dd->ipath_pbufsport * (port - 1);
+ ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
+
+ /* atomically clear receive enable port. */
+ clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
+ &dd->ipath_rcvctrl);
+ ipath_write_kreg(
+ dd,
+ dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+
+ if (dd->ipath_pageshadow)
+ unlock_expected_tids(pd);
+ ipath_stats.sps_ports--;
+ ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
+ pd->port_comm, pd->port_pid,
+ dd->ipath_unit, port);
+ }
+ }
+
+ pd->port_cnt = 0;
+ pd->port_pid = 0;
+
+ dd->ipath_f_clear_tids(dd, pd->port_port);
+
+ ipath_free_pddata(dd, pd->port_port, 0);
+
+ mutex_unlock(&ipath_mutex);
+
+ return ret;
+}
+
+static int ipath_port_info(struct ipath_portdata *pd,
+ struct ipath_port_info __user *uinfo)
+{
+ struct ipath_port_info info;
+ int nup;
+ int ret;
+
+ (void) ipath_count_units(NULL, &nup, NULL);
+ info.num_active = nup;
+ info.unit = pd->port_dd->ipath_unit;
+ info.port = pd->port_port;
+
+ if (copy_to_user(uinfo, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static ssize_t ipath_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ const struct ipath_cmd __user *ucmd;
+ struct ipath_portdata *pd;
+ const void __user *src;
+ size_t consumed, copy;
+ struct ipath_cmd cmd;
+ ssize_t ret = 0;
+ void *dest;
+
+ if (count < sizeof(cmd.type)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ucmd = (const struct ipath_cmd __user *) data;
+
+ if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ consumed = sizeof(cmd.type);
+
+ switch (cmd.type) {
+ case IPATH_CMD_USER_INIT:
+ copy = sizeof(cmd.cmd.user_info);
+ dest = &cmd.cmd.user_info;
+ src = &ucmd->cmd.user_info;
+ break;
+ case IPATH_CMD_RECV_CTRL:
+ copy = sizeof(cmd.cmd.recv_ctrl);
+ dest = &cmd.cmd.recv_ctrl;
+ src = &ucmd->cmd.recv_ctrl;
+ break;
+ case IPATH_CMD_PORT_INFO:
+ copy = sizeof(cmd.cmd.port_info);
+ dest = &cmd.cmd.port_info;
+ src = &ucmd->cmd.port_info;
+ break;
+ case IPATH_CMD_TID_UPDATE:
+ case IPATH_CMD_TID_FREE:
+ copy = sizeof(cmd.cmd.tid_info);
+ dest = &cmd.cmd.tid_info;
+ src = &ucmd->cmd.tid_info;
+ break;
+ case IPATH_CMD_SET_PART_KEY:
+ copy = sizeof(cmd.cmd.part_key);
+ dest = &cmd.cmd.part_key;
+ src = &ucmd->cmd.part_key;
+ break;
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if ((count - consumed) < copy) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (copy_from_user(dest, src, copy)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ consumed += copy;
+ pd = port_fp(fp);
+
+ switch (cmd.type) {
+ case IPATH_CMD_USER_INIT:
+ ret = ipath_do_user_init(pd, &cmd.cmd.user_info);
+ if (ret < 0)
+ goto bail;
+ ret = ipath_get_base_info(
+ pd, (void __user *) (unsigned long)
+ cmd.cmd.user_info.spu_base_info,
+ cmd.cmd.user_info.spu_base_info_size);
+ break;
+ case IPATH_CMD_RECV_CTRL:
+ ret = ipath_manage_rcvq(pd, cmd.cmd.recv_ctrl);
+ break;
+ case IPATH_CMD_PORT_INFO:
+ ret = ipath_port_info(pd,
+ (struct ipath_port_info __user *)
+ (unsigned long) cmd.cmd.port_info);
+ break;
+ case IPATH_CMD_TID_UPDATE:
+ ret = ipath_tid_update(pd, &cmd.cmd.tid_info);
+ break;
+ case IPATH_CMD_TID_FREE:
+ ret = ipath_tid_free(pd, &cmd.cmd.tid_info);
+ break;
+ case IPATH_CMD_SET_PART_KEY:
+ ret = ipath_set_part_key(pd, cmd.cmd.part_key);
+ break;
+ }
+
+ if (ret >= 0)
+ ret = consumed;
+
+bail:
+ return ret;
+}
+
+static struct class *ipath_class;
+
+static int init_cdev(int minor, char *name, struct file_operations *fops,
+ struct cdev **cdevp, struct class_device **class_devp)
+{
+ const dev_t dev = MKDEV(IPATH_MAJOR, minor);
+ struct cdev *cdev = NULL;
+ struct class_device *class_dev = NULL;
+ int ret;
+
+ cdev = cdev_alloc();
+ if (!cdev) {
+ printk(KERN_ERR IPATH_DRV_NAME
+ ": Could not allocate cdev for minor %d, %s\n",
+ minor, name);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cdev->owner = THIS_MODULE;
+ cdev->ops = fops;
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add(cdev, dev, 1);
+ if (ret < 0) {
+ printk(KERN_ERR IPATH_DRV_NAME
+ ": Could not add cdev for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ goto err_cdev;
+ }
+
+ class_dev = class_device_create(ipath_class, NULL, dev, NULL, name);
+
+ if (IS_ERR(class_dev)) {
+ ret = PTR_ERR(class_dev);
+ printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
+ "class_dev for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ goto err_cdev;
+ }
+
+ goto done;
+
+err_cdev:
+ cdev_del(cdev);
+ cdev = NULL;
+
+done:
+ if (ret >= 0) {
+ *cdevp = cdev;
+ *class_devp = class_dev;
+ } else {
+ *cdevp = NULL;
+ *class_devp = NULL;
+ }
+
+ return ret;
+}
+
+int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
+ struct cdev **cdevp, struct class_device **class_devp)
+{
+ return init_cdev(minor, name, fops, cdevp, class_devp);
+}
+
+static void cleanup_cdev(struct cdev **cdevp,
+ struct class_device **class_devp)
+{
+ struct class_device *class_dev = *class_devp;
+
+ if (class_dev) {
+ class_device_unregister(class_dev);
+ *class_devp = NULL;
+ }
+
+ if (*cdevp) {
+ cdev_del(*cdevp);
+ *cdevp = NULL;
+ }
+}
+
+void ipath_cdev_cleanup(struct cdev **cdevp,
+ struct class_device **class_devp)
+{
+ cleanup_cdev(cdevp, class_devp);
+}
+
+static struct cdev *wildcard_cdev;
+static struct class_device *wildcard_class_dev;
+
+static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
+
+static int user_init(void)
+{
+ int ret;
+
+ ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
+ if (ret < 0) {
+ printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
+ "chrdev region (err %d)\n", -ret);
+ goto done;
+ }
+
+ ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
+
+ if (IS_ERR(ipath_class)) {
+ ret = PTR_ERR(ipath_class);
+ printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
+ "device class (err %d)\n", -ret);
+ goto bail;
+ }
+
+ goto done;
+bail:
+ unregister_chrdev_region(dev, IPATH_NMINORS);
+done:
+ return ret;
+}
+
+static void user_cleanup(void)
+{
+ if (ipath_class) {
+ class_destroy(ipath_class);
+ ipath_class = NULL;
+ }
+
+ unregister_chrdev_region(dev, IPATH_NMINORS);
+}
+
+static atomic_t user_count = ATOMIC_INIT(0);
+static atomic_t user_setup = ATOMIC_INIT(0);
+
+int ipath_user_add(struct ipath_devdata *dd)
+{
+ char name[10];
+ int ret;
+
+ if (atomic_inc_return(&user_count) == 1) {
+ ret = user_init();
+ if (ret < 0) {
+ ipath_dev_err(dd, "Unable to set up user support: "
+ "error %d\n", -ret);
+ goto bail;
+ }
+ ret = ipath_diag_init();
+ if (ret < 0) {
+ ipath_dev_err(dd, "Unable to set up diag support: "
+ "error %d\n", -ret);
+ goto bail_sma;
+ }
+
+ ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
+ &wildcard_class_dev);
+ if (ret < 0) {
+ ipath_dev_err(dd, "Could not create wildcard "
+ "minor: error %d\n", -ret);
+ goto bail_diag;
+ }
+
+ atomic_set(&user_setup, 1);
+ }
+
+ snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
+
+ ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
+ &dd->cdev, &dd->class_dev);
+ if (ret < 0)
+ ipath_dev_err(dd, "Could not create user minor %d, %s\n",
+ dd->ipath_unit + 1, name);
+
+ goto bail;
+
+bail_diag:
+ ipath_diag_cleanup();
+bail_sma:
+ user_cleanup();
+bail:
+ return ret;
+}
+
+void ipath_user_del(struct ipath_devdata *dd)
+{
+ cleanup_cdev(&dd->cdev, &dd->class_dev);
+
+ if (atomic_dec_return(&user_count) == 0) {
+ if (atomic_read(&user_setup) == 0)
+ goto bail;
+
+ cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
+ ipath_diag_cleanup();
+ user_cleanup();
+
+ atomic_set(&user_setup, 0);
+ }
+bail:
+ return;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
new file mode 100644
index 0000000000000..e274120567e1d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -0,0 +1,605 @@
+/*
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/pci.h>
+
+#include "ipath_kernel.h"
+
+#define IPATHFS_MAGIC 0x726a77
+
+static struct super_block *ipath_super;
+
+static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
+ int mode, struct file_operations *fops,
+ void *data)
+{
+ int error;
+ struct inode *inode = new_inode(dir->i_sb);
+
+ if (!inode) {
+ error = -EPERM;
+ goto bail;
+ }
+
+ inode->i_mode = mode;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->u.generic_ip = data;
+ if ((mode & S_IFMT) == S_IFDIR) {
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_nlink++;
+ dir->i_nlink++;
+ }
+
+ inode->i_fop = fops;
+
+ d_instantiate(dentry, inode);
+ error = 0;
+
+bail:
+ return error;
+}
+
+static int create_file(const char *name, mode_t mode,
+ struct dentry *parent, struct dentry **dentry,
+ struct file_operations *fops, void *data)
+{
+ int error;
+
+ *dentry = NULL;
+ mutex_lock(&parent->d_inode->i_mutex);
+ *dentry = lookup_one_len(name, parent, strlen(name));
+ if (!IS_ERR(dentry))
+ error = ipathfs_mknod(parent->d_inode, *dentry,
+ mode, fops, data);
+ else
+ error = PTR_ERR(dentry);
+ mutex_unlock(&parent->d_inode->i_mutex);
+
+ return error;
+}
+
+static ssize_t atomic_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
+ sizeof ipath_stats);
+}
+
+static struct file_operations atomic_stats_ops = {
+ .read = atomic_stats_read,
+};
+
+#define NUM_COUNTERS sizeof(struct infinipath_counters) / sizeof(u64)
+
+static ssize_t atomic_counters_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 counters[NUM_COUNTERS];
+ u16 i;
+ struct ipath_devdata *dd;
+
+ dd = file->f_dentry->d_inode->u.generic_ip;
+
+ for (i = 0; i < NUM_COUNTERS; i++)
+ counters[i] = ipath_snap_cntr(dd, i);
+
+ return simple_read_from_buffer(buf, count, ppos, counters,
+ sizeof counters);
+}
+
+static struct file_operations atomic_counters_ops = {
+ .read = atomic_counters_read,
+};
+
+static ssize_t atomic_node_info_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u32 nodeinfo[10];
+ struct ipath_devdata *dd;
+ u64 guid;
+
+ dd = file->f_dentry->d_inode->u.generic_ip;
+
+ guid = be64_to_cpu(dd->ipath_guid);
+
+ nodeinfo[0] = /* BaseVersion is SMA */
+ /* ClassVersion is SMA */
+ (1 << 8) /* NodeType */
+ | (1 << 0); /* NumPorts */
+ nodeinfo[1] = (u32) (guid >> 32);
+ nodeinfo[2] = (u32) (guid & 0xffffffff);
+ /* PortGUID == SystemImageGUID for us */
+ nodeinfo[3] = nodeinfo[1];
+ /* PortGUID == SystemImageGUID for us */
+ nodeinfo[4] = nodeinfo[2];
+ /* PortGUID == NodeGUID for us */
+ nodeinfo[5] = nodeinfo[3];
+ /* PortGUID == NodeGUID for us */
+ nodeinfo[6] = nodeinfo[4];
+ nodeinfo[7] = (4 << 16) /* we support 4 pkeys */
+ | (dd->ipath_deviceid << 0);
+ /* our chip version as 16 bits major, 16 bits minor */
+ nodeinfo[8] = dd->ipath_minrev | (dd->ipath_majrev << 16);
+ nodeinfo[9] = (dd->ipath_unit << 24) | (dd->ipath_vendorid << 0);
+
+ return simple_read_from_buffer(buf, count, ppos, nodeinfo,
+ sizeof nodeinfo);
+}
+
+static struct file_operations atomic_node_info_ops = {
+ .read = atomic_node_info_read,
+};
+
+static ssize_t atomic_port_info_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u32 portinfo[13];
+ u32 tmp, tmp2;
+ struct ipath_devdata *dd;
+
+ dd = file->f_dentry->d_inode->u.generic_ip;
+
+ /* so we only initialize non-zero fields. */
+ memset(portinfo, 0, sizeof portinfo);
+
+ /*
+ * Notimpl yet M_Key (64)
+ * Notimpl yet GID (64)
+ */
+
+ portinfo[4] = (dd->ipath_lid << 16);
+
+ /*
+ * Notimpl yet SMLID (should we store this in the driver, in case
+ * SMA dies?) CapabilityMask is 0, we don't support any of these
+ * DiagCode is 0; we don't store any diag info for now Notimpl yet
+ * M_KeyLeasePeriod (we don't support M_Key)
+ */
+
+ /* LocalPortNum is whichever port number they ask for */
+ portinfo[7] = (dd->ipath_unit << 24)
+ /* LinkWidthEnabled */
+ | (2 << 16)
+ /* LinkWidthSupported (really 2, but not IB valid) */
+ | (3 << 8)
+ /* LinkWidthActive */
+ | (2 << 0);
+ tmp = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
+ tmp2 = 5;
+ if (tmp == IPATH_IBSTATE_INIT)
+ tmp = 2;
+ else if (tmp == IPATH_IBSTATE_ARM)
+ tmp = 3;
+ else if (tmp == IPATH_IBSTATE_ACTIVE)
+ tmp = 4;
+ else {
+ tmp = 0; /* down */
+ tmp2 = tmp & 0xf;
+ }
+
+ portinfo[8] = (1 << 28) /* LinkSpeedSupported */
+ | (tmp << 24) /* PortState */
+ | (tmp2 << 20) /* PortPhysicalState */
+ | (2 << 16)
+
+ /* LinkDownDefaultState */
+ /* M_KeyProtectBits == 0 */
+ /* NotImpl yet LMC == 0 (we can support all values) */
+ | (1 << 4) /* LinkSpeedActive */
+ | (1 << 0); /* LinkSpeedEnabled */
+ switch (dd->ipath_ibmtu) {
+ case 4096:
+ tmp = 5;
+ break;
+ case 2048:
+ tmp = 4;
+ break;
+ case 1024:
+ tmp = 3;
+ break;
+ case 512:
+ tmp = 2;
+ break;
+ case 256:
+ tmp = 1;
+ break;
+ default: /* oops, something is wrong */
+ ipath_dbg("Problem, ipath_ibmtu 0x%x not a valid IB MTU, "
+ "treat as 2048\n", dd->ipath_ibmtu);
+ tmp = 4;
+ break;
+ }
+ portinfo[9] = (tmp << 28)
+ /* NeighborMTU */
+ /* Notimpl MasterSMSL */
+ | (1 << 20)
+
+ /* VLCap */
+ /* Notimpl InitType (actually, an SMA decision) */
+ /* VLHighLimit is 0 (only one VL) */
+ ; /* VLArbitrationHighCap is 0 (only one VL) */
+ portinfo[10] = /* VLArbitrationLowCap is 0 (only one VL) */
+ /* InitTypeReply is SMA decision */
+ (5 << 16) /* MTUCap 4096 */
+ | (7 << 13) /* VLStallCount */
+ | (0x1f << 8) /* HOQLife */
+ | (1 << 4)
+
+ /* OperationalVLs 0 */
+ /* PartitionEnforcementInbound */
+ /* PartitionEnforcementOutbound not enforced */
+ /* FilterRawinbound not enforced */
+ ; /* FilterRawOutbound not enforced */
+ /* M_KeyViolations are not counted by hardware, SMA can count */
+ tmp = ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
+ /* P_KeyViolations are counted by hardware. */
+ portinfo[11] = ((tmp & 0xffff) << 0);
+ portinfo[12] =
+ /* Q_KeyViolations are not counted by hardware */
+ (1 << 8)
+
+ /* GUIDCap */
+ /* SubnetTimeOut handled by SMA */
+ /* RespTimeValue handled by SMA */
+ ;
+ /* LocalPhyErrors are programmed to max */
+ portinfo[12] |= (0xf << 20)
+ | (0xf << 16) /* OverRunErrors are programmed to max */
+ ;
+
+ return simple_read_from_buffer(buf, count, ppos, portinfo,
+ sizeof portinfo);
+}
+
+static struct file_operations atomic_port_info_ops = {
+ .read = atomic_port_info_read,
+};
+
+static ssize_t flash_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ipath_devdata *dd;
+ ssize_t ret;
+ loff_t pos;
+ char *tmp;
+
+ pos = *ppos;
+
+ if ( pos < 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (pos >= sizeof(struct ipath_flash)) {
+ ret = 0;
+ goto bail;
+ }
+
+ if (count > sizeof(struct ipath_flash) - pos)
+ count = sizeof(struct ipath_flash) - pos;
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dd = file->f_dentry->d_inode->u.generic_ip;
+ if (ipath_eeprom_read(dd, pos, tmp, count)) {
+ ipath_dev_err(dd, "failed to read from flash\n");
+ ret = -ENXIO;
+ goto bail_tmp;
+ }
+
+ if (copy_to_user(buf, tmp, count)) {
+ ret = -EFAULT;
+ goto bail_tmp;
+ }
+
+ *ppos = pos + count;
+ ret = count;
+
+bail_tmp:
+ kfree(tmp);
+
+bail:
+ return ret;
+}
+
+static ssize_t flash_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ipath_devdata *dd;
+ ssize_t ret;
+ loff_t pos;
+ char *tmp;
+
+ pos = *ppos;
+
+ if ( pos < 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (pos >= sizeof(struct ipath_flash)) {
+ ret = 0;
+ goto bail;
+ }
+
+ if (count > sizeof(struct ipath_flash) - pos)
+ count = sizeof(struct ipath_flash) - pos;
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmp, buf, count)) {
+ ret = -EFAULT;
+ goto bail_tmp;
+ }
+
+ dd = file->f_dentry->d_inode->u.generic_ip;
+ if (ipath_eeprom_write(dd, pos, tmp, count)) {
+ ret = -ENXIO;
+ ipath_dev_err(dd, "failed to write to flash\n");
+ goto bail_tmp;
+ }
+
+ *ppos = pos + count;
+ ret = count;
+
+bail_tmp:
+ kfree(tmp);
+
+bail:
+ return ret;
+}
+
+static struct file_operations flash_ops = {
+ .read = flash_read,
+ .write = flash_write,
+};
+
+static int create_device_files(struct super_block *sb,
+ struct ipath_devdata *dd)
+{
+ struct dentry *dir, *tmp;
+ char unit[10];
+ int ret;
+
+ snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
+ ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
+ (struct file_operations *) &simple_dir_operations,
+ dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
+ goto bail;
+ }
+
+ ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
+ &atomic_counters_ops, dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/atomic_counters) "
+ "failed: %d\n", unit, ret);
+ goto bail;
+ }
+
+ ret = create_file("node_info", S_IFREG|S_IRUGO, dir, &tmp,
+ &atomic_node_info_ops, dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/node_info) "
+ "failed: %d\n", unit, ret);
+ goto bail;
+ }
+
+ ret = create_file("port_info", S_IFREG|S_IRUGO, dir, &tmp,
+ &atomic_port_info_ops, dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/port_info) "
+ "failed: %d\n", unit, ret);
+ goto bail;
+ }
+
+ ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
+ &flash_ops, dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/flash) "
+ "failed: %d\n", unit, ret);
+ goto bail;
+ }
+
+bail:
+ return ret;
+}
+
+static void remove_file(struct dentry *parent, char *name)
+{
+ struct dentry *tmp;
+
+ tmp = lookup_one_len(name, parent, strlen(name));
+
+ spin_lock(&dcache_lock);
+ spin_lock(&tmp->d_lock);
+ if (!(d_unhashed(tmp) && tmp->d_inode)) {
+ dget_locked(tmp);
+ __d_drop(tmp);
+ spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_lock);
+ simple_unlink(parent->d_inode, tmp);
+ } else {
+ spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_lock);
+ }
+}
+
+static int remove_device_files(struct super_block *sb,
+ struct ipath_devdata *dd)
+{
+ struct dentry *dir, *root;
+ char unit[10];
+ int ret;
+
+ root = dget(sb->s_root);
+ mutex_lock(&root->d_inode->i_mutex);
+ snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
+ dir = lookup_one_len(unit, root, strlen(unit));
+
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ printk(KERN_ERR "Lookup of %s failed\n", unit);
+ goto bail;
+ }
+
+ remove_file(dir, "flash");
+ remove_file(dir, "port_info");
+ remove_file(dir, "node_info");
+ remove_file(dir, "atomic_counters");
+ d_delete(dir);
+ ret = simple_rmdir(root->d_inode, dir);
+
+bail:
+ mutex_unlock(&root->d_inode->i_mutex);
+ dput(root);
+ return ret;
+}
+
+static int ipathfs_fill_super(struct super_block *sb, void *data,
+ int silent)
+{
+ struct ipath_devdata *dd, *tmp;
+ unsigned long flags;
+ int ret;
+
+ static struct tree_descr files[] = {
+ [1] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
+ {""},
+ };
+
+ ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
+ if (ret) {
+ printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ ret = create_device_files(sb, dd);
+ if (ret) {
+ deactivate_super(sb);
+ goto bail;
+ }
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+bail:
+ return ret;
+}
+
+static struct super_block *ipathfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data)
+{
+ ipath_super = get_sb_single(fs_type, flags, data,
+ ipathfs_fill_super);
+ return ipath_super;
+}
+
+static void ipathfs_kill_super(struct super_block *s)
+{
+ kill_litter_super(s);
+ ipath_super = NULL;
+}
+
+int ipathfs_add_device(struct ipath_devdata *dd)
+{
+ int ret;
+
+ if (ipath_super == NULL) {
+ ret = 0;
+ goto bail;
+ }
+
+ ret = create_device_files(ipath_super, dd);
+
+bail:
+ return ret;
+}
+
+int ipathfs_remove_device(struct ipath_devdata *dd)
+{
+ int ret;
+
+ if (ipath_super == NULL) {
+ ret = 0;
+ goto bail;
+ }
+
+ ret = remove_device_files(ipath_super, dd);
+
+bail:
+ return ret;
+}
+
+static struct file_system_type ipathfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ipathfs",
+ .get_sb = ipathfs_get_sb,
+ .kill_sb = ipathfs_kill_super,
+};
+
+int __init ipath_init_ipathfs(void)
+{
+ return register_filesystem(&ipathfs_fs_type);
+}
+
+void __exit ipath_exit_ipathfs(void)
+{
+ unregister_filesystem(&ipathfs_fs_type);
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_ht400.c
new file mode 100644
index 0000000000000..4652435998f35
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_ht400.c
@@ -0,0 +1,1586 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains all of the code that is specific to the InfiniPath
+ * HT-400 chip.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "ipath_kernel.h"
+#include "ipath_registers.h"
+
+/*
+ * This lists the InfiniPath HT400 registers, in the actual chip layout.
+ * This structure should never be directly accessed.
+ *
+ * The names are in InterCap form because they're taken straight from
+ * the chip specification. Since they're only used in this file, they
+ * don't pollute the rest of the source.
+*/
+
+struct _infinipath_do_not_use_kernel_regs {
+ unsigned long long Revision;
+ unsigned long long Control;
+ unsigned long long PageAlign;
+ unsigned long long PortCnt;
+ unsigned long long DebugPortSelect;
+ unsigned long long DebugPort;
+ unsigned long long SendRegBase;
+ unsigned long long UserRegBase;
+ unsigned long long CounterRegBase;
+ unsigned long long Scratch;
+ unsigned long long ReservedMisc1;
+ unsigned long long InterruptConfig;
+ unsigned long long IntBlocked;
+ unsigned long long IntMask;
+ unsigned long long IntStatus;
+ unsigned long long IntClear;
+ unsigned long long ErrorMask;
+ unsigned long long ErrorStatus;
+ unsigned long long ErrorClear;
+ unsigned long long HwErrMask;
+ unsigned long long HwErrStatus;
+ unsigned long long HwErrClear;
+ unsigned long long HwDiagCtrl;
+ unsigned long long MDIO;
+ unsigned long long IBCStatus;
+ unsigned long long IBCCtrl;
+ unsigned long long ExtStatus;
+ unsigned long long ExtCtrl;
+ unsigned long long GPIOOut;
+ unsigned long long GPIOMask;
+ unsigned long long GPIOStatus;
+ unsigned long long GPIOClear;
+ unsigned long long RcvCtrl;
+ unsigned long long RcvBTHQP;
+ unsigned long long RcvHdrSize;
+ unsigned long long RcvHdrCnt;
+ unsigned long long RcvHdrEntSize;
+ unsigned long long RcvTIDBase;
+ unsigned long long RcvTIDCnt;
+ unsigned long long RcvEgrBase;
+ unsigned long long RcvEgrCnt;
+ unsigned long long RcvBufBase;
+ unsigned long long RcvBufSize;
+ unsigned long long RxIntMemBase;
+ unsigned long long RxIntMemSize;
+ unsigned long long RcvPartitionKey;
+ unsigned long long ReservedRcv[10];
+ unsigned long long SendCtrl;
+ unsigned long long SendPIOBufBase;
+ unsigned long long SendPIOSize;
+ unsigned long long SendPIOBufCnt;
+ unsigned long long SendPIOAvailAddr;
+ unsigned long long TxIntMemBase;
+ unsigned long long TxIntMemSize;
+ unsigned long long ReservedSend[9];
+ unsigned long long SendBufferError;
+ unsigned long long SendBufferErrorCONT1;
+ unsigned long long SendBufferErrorCONT2;
+ unsigned long long SendBufferErrorCONT3;
+ unsigned long long ReservedSBE[4];
+ unsigned long long RcvHdrAddr0;
+ unsigned long long RcvHdrAddr1;
+ unsigned long long RcvHdrAddr2;
+ unsigned long long RcvHdrAddr3;
+ unsigned long long RcvHdrAddr4;
+ unsigned long long RcvHdrAddr5;
+ unsigned long long RcvHdrAddr6;
+ unsigned long long RcvHdrAddr7;
+ unsigned long long RcvHdrAddr8;
+ unsigned long long ReservedRHA[7];
+ unsigned long long RcvHdrTailAddr0;
+ unsigned long long RcvHdrTailAddr1;
+ unsigned long long RcvHdrTailAddr2;
+ unsigned long long RcvHdrTailAddr3;
+ unsigned long long RcvHdrTailAddr4;
+ unsigned long long RcvHdrTailAddr5;
+ unsigned long long RcvHdrTailAddr6;
+ unsigned long long RcvHdrTailAddr7;
+ unsigned long long RcvHdrTailAddr8;
+ unsigned long long ReservedRHTA[7];
+ unsigned long long Sync; /* Software only */
+ unsigned long long Dump; /* Software only */
+ unsigned long long SimVer; /* Software only */
+ unsigned long long ReservedSW[5];
+ unsigned long long SerdesConfig0;
+ unsigned long long SerdesConfig1;
+ unsigned long long SerdesStatus;
+ unsigned long long XGXSConfig;
+ unsigned long long ReservedSW2[4];
+};
+
+#define IPATH_KREG_OFFSET(field) (offsetof(struct \
+ _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
+#define IPATH_CREG_OFFSET(field) (offsetof( \
+ struct infinipath_counters, field) / sizeof(u64))
+
+static const struct ipath_kregs ipath_ht_kregs = {
+ .kr_control = IPATH_KREG_OFFSET(Control),
+ .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
+ .kr_debugport = IPATH_KREG_OFFSET(DebugPort),
+ .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
+ .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
+ .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
+ .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
+ .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
+ .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
+ .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
+ .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
+ .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
+ .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
+ .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
+ .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
+ .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
+ .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
+ .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
+ .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
+ .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
+ .kr_intclear = IPATH_KREG_OFFSET(IntClear),
+ .kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
+ .kr_intmask = IPATH_KREG_OFFSET(IntMask),
+ .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
+ .kr_mdio = IPATH_KREG_OFFSET(MDIO),
+ .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
+ .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
+ .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
+ .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
+ .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
+ .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
+ .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
+ .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
+ .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
+ .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
+ .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
+ .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
+ .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
+ .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
+ .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
+ .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
+ .kr_revision = IPATH_KREG_OFFSET(Revision),
+ .kr_scratch = IPATH_KREG_OFFSET(Scratch),
+ .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
+ .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
+ .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
+ .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
+ .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
+ .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
+ .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
+ .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
+ .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
+ .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
+ .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
+ .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
+ .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
+ .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
+ /*
+ * These should not be used directly via ipath_read_kreg64(),
+ * use them with ipath_read_kreg64_port(),
+ */
+ .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
+ .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
+};
+
+static const struct ipath_cregs ipath_ht_cregs = {
+ .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
+ .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
+ .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
+ .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
+ .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
+ .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
+ .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
+ .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
+ .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
+ .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
+ .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
+ .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
+ /* calc from Reg_CounterRegBase + offset */
+ .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
+ .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
+ .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
+ .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
+ .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
+ .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
+ .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
+ .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
+ .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
+ .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
+ .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
+ .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
+ .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
+ .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
+ .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
+ .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
+ .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
+ .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
+ .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
+ .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
+ .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
+};
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define INFINIPATH_I_RCVURG_MASK 0x1FF
+#define INFINIPATH_I_RCVAVAIL_MASK 0x1FF
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
+#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
+#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR 0x0000000000800000ULL
+#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR 0x0000000001000000ULL
+#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR 0x0000000002000000ULL
+#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR 0x0000000004000000ULL
+#define INFINIPATH_HWE_HTCMISCERR4 0x0000000008000000ULL
+#define INFINIPATH_HWE_HTCMISCERR5 0x0000000010000000ULL
+#define INFINIPATH_HWE_HTCMISCERR6 0x0000000020000000ULL
+#define INFINIPATH_HWE_HTCMISCERR7 0x0000000040000000ULL
+#define INFINIPATH_HWE_HTCBUSTREQPARITYERR 0x0000000080000000ULL
+#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
+#define INFINIPATH_HWE_HTCBUSIREQPARITYERR 0x0000000200000000ULL
+#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
+#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
+#define INFINIPATH_HWE_HTBPLL_FBSLIP 0x0200000000000000ULL
+#define INFINIPATH_HWE_HTBPLL_RFSLIP 0x0400000000000000ULL
+#define INFINIPATH_HWE_HTAPLL_FBSLIP 0x0800000000000000ULL
+#define INFINIPATH_HWE_HTAPLL_RFSLIP 0x1000000000000000ULL
+#define INFINIPATH_HWE_SERDESPLLFAILED 0x2000000000000000ULL
+
+/* kr_extstatus bits */
+#define INFINIPATH_EXTS_FREQSEL 0x2
+#define INFINIPATH_EXTS_SERDESSEL 0x4
+#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
+#define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000
+
+/*
+ * masks and bits that are different in different chips, or present only
+ * in one
+ */
+static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
+ INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
+static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
+ INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
+
+static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
+ INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
+static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
+ INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
+static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
+ INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
+static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
+ INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
+
+#define _IPATH_GPIO_SDA_NUM 1
+#define _IPATH_GPIO_SCL_NUM 0
+
+#define IPATH_GPIO_SDA \
+ (1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+#define IPATH_GPIO_SCL \
+ (1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+
+/* keep the code below somewhat more readonable; not used elsewhere */
+#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \
+ infinipath_hwe_htclnkabyte1crcerr)
+#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr | \
+ infinipath_hwe_htclnkbbyte1crcerr)
+#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \
+ infinipath_hwe_htclnkbbyte0crcerr)
+#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr | \
+ infinipath_hwe_htclnkbbyte1crcerr)
+
+static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
+ char *msg, size_t msgl)
+{
+ char bitsmsg[64];
+ ipath_err_t crcbits = hwerrs &
+ (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
+ /* don't check if 8bit HT */
+ if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
+ crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
+ /* don't check if 8bit HT */
+ if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
+ crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
+ /*
+ * we'll want to ignore link errors on link that is
+ * not in use, if any. For now, complain about both
+ */
+ if (crcbits) {
+ u16 ctrl0, ctrl1;
+ snprintf(bitsmsg, sizeof bitsmsg,
+ "[HT%s lane %s CRC (%llx); ignore till reload]",
+ !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
+ "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
+ ? "1 (B)" : "0+1 (A+B)"),
+ !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
+ : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
+ "0+1"), (unsigned long long) crcbits);
+ strlcat(msg, bitsmsg, msgl);
+
+ /*
+ * print extra info for debugging. slave/primary
+ * config word 4, 8 (link control 0, 1)
+ */
+
+ if (pci_read_config_word(dd->pcidev,
+ dd->ipath_ht_slave_off + 0x4,
+ &ctrl0))
+ dev_info(&dd->pcidev->dev, "Couldn't read "
+ "linkctrl0 of slave/primary "
+ "config block\n");
+ else if (!(ctrl0 & 1 << 6))
+ /* not if EOC bit set */
+ ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
+ ((ctrl0 >> 8) & 7) ? " CRC" : "",
+ ((ctrl0 >> 4) & 1) ? "linkfail" :
+ "");
+ if (pci_read_config_word(dd->pcidev,
+ dd->ipath_ht_slave_off + 0x8,
+ &ctrl1))
+ dev_info(&dd->pcidev->dev, "Couldn't read "
+ "linkctrl1 of slave/primary "
+ "config block\n");
+ else if (!(ctrl1 & 1 << 6))
+ /* not if EOC bit set */
+ ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
+ ((ctrl1 >> 8) & 7) ? " CRC" : "",
+ ((ctrl1 >> 4) & 1) ? "linkfail" :
+ "");
+
+ /* disable until driver reloaded */
+ dd->ipath_hwerrmask &= ~crcbits;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+ dd->ipath_hwerrmask);
+ ipath_dbg("HT crc errs: %s\n", msg);
+ } else
+ ipath_dbg("ignoring HT crc errors 0x%llx, "
+ "not in use\n", (unsigned long long)
+ (hwerrs & (_IPATH_HTLINK0_CRCBITS |
+ _IPATH_HTLINK1_CRCBITS)));
+}
+
+/**
+ * ipath_ht_handle_hwerrors - display hardware errors
+ * @dd: the infinipath device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid
+ * excessive stack use. Most hardware errors are catastrophic, but for
+ * right now, we'll print them and continue.
+ * We reuse the same message buffer as ipath_handle_errors() to avoid
+ * excessive stack usage.
+ */
+static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+ size_t msgl)
+{
+ ipath_err_t hwerrs;
+ u32 bits, ctrl;
+ int isfatal = 0;
+ char bitsmsg[64];
+
+ hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
+
+ if (!hwerrs) {
+ ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
+ /*
+ * better than printing cofusing messages
+ * This seems to be related to clearing the crc error, or
+ * the pll error during init.
+ */
+ goto bail;
+ } else if (hwerrs == -1LL) {
+ ipath_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ goto bail;
+ }
+ ipath_stats.sps_hwerrs++;
+
+ /* Always clear the error status register, except MEMBISTFAIL,
+ * regardless of whether we continue or stop using the chip.
+ * We want that set so we know it failed, even across driver reload.
+ * We'll still ignore it in the hwerrmask. We do this partly for
+ * diagnostics, but also for support */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+ hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
+
+ hwerrs &= dd->ipath_hwerrmask;
+
+ /*
+ * make sure we get this much out, unless told to be quiet,
+ * or it's occurred within the last 5 seconds
+ */
+ if ((hwerrs & ~dd->ipath_lasthwerror) ||
+ (ipath_debug & __IPATH_VERBDBG))
+ dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+ dd->ipath_lasthwerror |= hwerrs;
+
+ if (hwerrs & ~infinipath_hwe_bitsextant)
+ ipath_dev_err(dd, "hwerror interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (hwerrs & ~infinipath_hwe_bitsextant));
+
+ ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
+ if (ctrl & INFINIPATH_C_FREEZEMODE) {
+ if (hwerrs) {
+ /*
+ * if any set that we aren't ignoring; only
+ * make the complaint once, in case it's stuck
+ * or recurring, and we get here multiple
+ * times.
+ */
+ if (dd->ipath_flags & IPATH_INITTED) {
+ ipath_dev_err(dd, "Fatal Error (freeze "
+ "mode), no longer usable\n");
+ isfatal = 1;
+ }
+ *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+ /* mark as having had error */
+ *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+ /*
+ * mark as not usable, at a minimum until driver
+ * is reloaded, probably until reboot, since no
+ * other reset is possible.
+ */
+ dd->ipath_flags &= ~IPATH_INITTED;
+ } else {
+ ipath_dbg("Clearing freezemode on ignored hardware "
+ "error\n");
+ ctrl &= ~INFINIPATH_C_FREEZEMODE;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+ ctrl);
+ }
+ }
+
+ *msg = '\0';
+
+ /*
+ * may someday want to decode into which bits are which
+ * functional area for parity errors, etc.
+ */
+ if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
+ << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
+ INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
+ bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+ if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
+ << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
+ INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
+ bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+ if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
+ << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
+ INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
+ bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+ if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
+ strlcat(msg, "[IB2IPATH Parity]", msgl);
+ if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
+ strlcat(msg, "[IPATH2IB Parity]", msgl);
+ if (hwerrs & INFINIPATH_HWE_HTCBUSIREQPARITYERR)
+ strlcat(msg, "[HTC Ireq Parity]", msgl);
+ if (hwerrs & INFINIPATH_HWE_HTCBUSTREQPARITYERR)
+ strlcat(msg, "[HTC Treq Parity]", msgl);
+ if (hwerrs & INFINIPATH_HWE_HTCBUSTRESPPARITYERR)
+ strlcat(msg, "[HTC Tresp Parity]", msgl);
+
+ if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
+ hwerr_crcbits(dd, hwerrs, msg, msgl);
+
+ if (hwerrs & INFINIPATH_HWE_HTCMISCERR5)
+ strlcat(msg, "[HT core Misc5]", msgl);
+ if (hwerrs & INFINIPATH_HWE_HTCMISCERR6)
+ strlcat(msg, "[HT core Misc6]", msgl);
+ if (hwerrs & INFINIPATH_HWE_HTCMISCERR7)
+ strlcat(msg, "[HT core Misc7]", msgl);
+ if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
+ strlcat(msg, "[Memory BIST test failed, HT-400 unusable]",
+ msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+ dd->ipath_hwerrmask);
+ }
+#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
+ INFINIPATH_HWE_COREPLL_RFSLIP | \
+ INFINIPATH_HWE_HTBPLL_FBSLIP | \
+ INFINIPATH_HWE_HTBPLL_RFSLIP | \
+ INFINIPATH_HWE_HTAPLL_FBSLIP | \
+ INFINIPATH_HWE_HTAPLL_RFSLIP)
+
+ if (hwerrs & _IPATH_PLL_FAIL) {
+ snprintf(bitsmsg, sizeof bitsmsg,
+ "[PLL failed (%llx), HT-400 unusable]",
+ (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
+ strlcat(msg, bitsmsg, msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+ dd->ipath_hwerrmask);
+ }
+
+ if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
+ /*
+ * If it occurs, it is left masked since the eternal
+ * interface is unused
+ */
+ dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+ dd->ipath_hwerrmask);
+ }
+
+ if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
+ strlcat(msg, "[Rx Dsync]", msgl);
+ if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
+ strlcat(msg, "[SerDes PLL]", msgl);
+
+ ipath_dev_err(dd, "%s hardware error\n", msg);
+ if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
+ /*
+ * for status file; if no trailing brace is copied,
+ * we'll know it was truncated.
+ */
+ snprintf(dd->ipath_freezemsg,
+ dd->ipath_freezelen, "{%s}", msg);
+
+bail:;
+}
+
+/**
+ * ipath_ht_boardname - fill in the board name
+ * @dd: the infinipath device
+ * @name: the output buffer
+ * @namelen: the size of the output buffer
+ *
+ * fill in the board name, based on the board revision register
+ */
+static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
+ size_t namelen)
+{
+ char *n = NULL;
+ u8 boardrev = dd->ipath_boardrev;
+ int ret;
+
+ switch (boardrev) {
+ case 4: /* Ponderosa is one of the bringup boards */
+ n = "Ponderosa";
+ break;
+ case 5: /* HT-460 original production board */
+ n = "InfiniPath_HT-460";
+ break;
+ case 6:
+ n = "OEM_Board_3";
+ break;
+ case 7:
+ /* HT-460 small form factor production board */
+ n = "InfiniPath_HT-465";
+ break;
+ case 8:
+ n = "LS/X-1";
+ break;
+ case 9: /* Comstock bringup test board */
+ n = "Comstock";
+ break;
+ case 10:
+ n = "OEM_Board_2";
+ break;
+ case 11:
+ n = "InfiniPath_HT-470";
+ break;
+ case 12:
+ n = "OEM_Board_4";
+ break;
+ default: /* don't know, just print the number */
+ ipath_dev_err(dd, "Don't yet know about board "
+ "with ID %u\n", boardrev);
+ snprintf(name, namelen, "Unknown_InfiniPath_HT-4xx_%u",
+ boardrev);
+ break;
+ }
+ if (n)
+ snprintf(name, namelen, "%s", n);
+
+ if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) {
+ /*
+ * This version of the driver only supports the HT-400
+ * Rev 3.2
+ */
+ ipath_dev_err(dd,
+ "Unsupported HT-400 revision %u.%u!\n",
+ dd->ipath_majrev, dd->ipath_minrev);
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * pkt/word counters are 32 bit, and therefore wrap fast enough
+ * that we snapshot them from a timer, and maintain 64 bit shadow
+ * copies
+ */
+ dd->ipath_flags |= IPATH_32BITCOUNTERS;
+ if (dd->ipath_htspeed != 800)
+ ipath_dev_err(dd,
+ "Incorrectly configured for HT @ %uMHz\n",
+ dd->ipath_htspeed);
+ if (dd->ipath_boardrev == 7 || dd->ipath_boardrev == 11 ||
+ dd->ipath_boardrev == 6)
+ dd->ipath_flags |= IPATH_GPIO_INTR;
+ else
+ dd->ipath_flags |= IPATH_POLL_RX_INTR;
+ if (dd->ipath_boardrev == 8) { /* LS/X-1 */
+ u64 val;
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+ if (val & INFINIPATH_EXTS_SERDESSEL) {
+ /*
+ * hardware disabled
+ *
+ * This means that the chip is hardware disabled,
+ * and will not be able to bring up the link,
+ * in any case. We special case this and abort
+ * early, to avoid later messages. We also set
+ * the DISABLED status bit
+ */
+ ipath_dbg("Unit %u is hardware-disabled\n",
+ dd->ipath_unit);
+ *dd->ipath_statusp |= IPATH_STATUS_DISABLED;
+ /* this value is handled differently */
+ ret = 2;
+ goto bail;
+ }
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static void ipath_check_htlink(struct ipath_devdata *dd)
+{
+ u8 linkerr, link_off, i;
+
+ for (i = 0; i < 2; i++) {
+ link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
+ if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
+ dev_info(&dd->pcidev->dev, "Couldn't read "
+ "linkerror%d of HT slave/primary block\n",
+ i);
+ else if (linkerr & 0xf0) {
+ ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
+ "clearing\n", linkerr >> 4, i);
+ /*
+ * writing the linkerr bits that are set should
+ * clear them
+ */
+ if (pci_write_config_byte(dd->pcidev, link_off,
+ linkerr))
+ ipath_dbg("Failed write to clear HT "
+ "linkerror%d\n", i);
+ if (pci_read_config_byte(dd->pcidev, link_off,
+ &linkerr))
+ dev_info(&dd->pcidev->dev,
+ "Couldn't reread linkerror%d of "
+ "HT slave/primary block\n", i);
+ else if (linkerr & 0xf0)
+ dev_info(&dd->pcidev->dev,
+ "HT linkerror%d bits 0x%x "
+ "couldn't be cleared\n",
+ i, linkerr >> 4);
+ }
+ }
+}
+
+static int ipath_setup_ht_reset(struct ipath_devdata *dd)
+{
+ ipath_dbg("No reset possible for HT-400\n");
+ return 0;
+}
+
+#define HT_CAPABILITY_ID 0x08 /* HT capabilities not defined in kernel */
+#define HT_INTR_DISC_CONFIG 0x80 /* HT interrupt and discovery cap */
+#define HT_INTR_REG_INDEX 2 /* intconfig requires indirect accesses */
+
+/*
+ * Bits 13-15 of command==0 is slave/primary block. Clear any HT CRC
+ * errors. We only bother to do this at load time, because it's OK if
+ * it happened before we were loaded (first time after boot/reset),
+ * but any time after that, it's fatal anyway. Also need to not check
+ * for for upper byte errors if we are in 8 bit mode, so figure out
+ * our width. For now, at least, also complain if it's 8 bit.
+ */
+static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
+ int pos, u8 cap_type)
+{
+ u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
+ u16 linkctrl = 0;
+ int i;
+
+ dd->ipath_ht_slave_off = pos;
+ /* command word, master_host bit */
+ /* master host || slave */
+ if ((cap_type >> 2) & 1)
+ link_a_b_off = 4;
+ else
+ link_a_b_off = 0;
+ ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
+ link_a_b_off ? 1 : 0,
+ link_a_b_off ? 'B' : 'A');
+
+ link_a_b_off += pos;
+
+ /*
+ * check both link control registers; clear both HT CRC sets if
+ * necessary.
+ */
+ for (i = 0; i < 2; i++) {
+ link_off = pos + i * 4 + 0x4;
+ if (pci_read_config_word(pdev, link_off, &linkctrl))
+ ipath_dev_err(dd, "Couldn't read HT link control%d "
+ "register\n", i);
+ else if (linkctrl & (0xf << 8)) {
+ ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
+ "bits %x\n", i, linkctrl & (0xf << 8));
+ /*
+ * now write them back to clear the error.
+ */
+ pci_write_config_byte(pdev, link_off,
+ linkctrl & (0xf << 8));
+ }
+ }
+
+ /*
+ * As with HT CRC bits, same for protocol errors that might occur
+ * during boot.
+ */
+ for (i = 0; i < 2; i++) {
+ link_off = pos + i * 4 + 0xd;
+ if (pci_read_config_byte(pdev, link_off, &linkerr))
+ dev_info(&pdev->dev, "Couldn't read linkerror%d "
+ "of HT slave/primary block\n", i);
+ else if (linkerr & 0xf0) {
+ ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
+ "clearing\n", linkerr >> 4, i);
+ /*
+ * writing the linkerr bits that are set will clear
+ * them
+ */
+ if (pci_write_config_byte
+ (pdev, link_off, linkerr))
+ ipath_dbg("Failed write to clear HT "
+ "linkerror%d\n", i);
+ if (pci_read_config_byte(pdev, link_off, &linkerr))
+ dev_info(&pdev->dev, "Couldn't reread "
+ "linkerror%d of HT slave/primary "
+ "block\n", i);
+ else if (linkerr & 0xf0)
+ dev_info(&pdev->dev, "HT linkerror%d bits "
+ "0x%x couldn't be cleared\n",
+ i, linkerr >> 4);
+ }
+ }
+
+ /*
+ * this is just for our link to the host, not devices connected
+ * through tunnel.
+ */
+
+ if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
+ ipath_dev_err(dd, "Couldn't read HT link width "
+ "config register\n");
+ else {
+ u32 width;
+ switch (linkwidth & 7) {
+ case 5:
+ width = 4;
+ break;
+ case 4:
+ width = 2;
+ break;
+ case 3:
+ width = 32;
+ break;
+ case 1:
+ width = 16;
+ break;
+ case 0:
+ default: /* if wrong, assume 8 bit */
+ width = 8;
+ break;
+ }
+
+ dd->ipath_htwidth = width;
+
+ if (linkwidth != 0x11) {
+ ipath_dev_err(dd, "Not configured for 16 bit HT "
+ "(%x)\n", linkwidth);
+ if (!(linkwidth & 0xf)) {
+ ipath_dbg("Will ignore HT lane1 errors\n");
+ dd->ipath_flags |= IPATH_8BIT_IN_HT0;
+ }
+ }
+ }
+
+ /*
+ * this is just for our link to the host, not devices connected
+ * through tunnel.
+ */
+ if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
+ ipath_dev_err(dd, "Couldn't read HT link frequency "
+ "config register\n");
+ else {
+ u32 speed;
+ switch (linkwidth & 0xf) {
+ case 6:
+ speed = 1000;
+ break;
+ case 5:
+ speed = 800;
+ break;
+ case 4:
+ speed = 600;
+ break;
+ case 3:
+ speed = 500;
+ break;
+ case 2:
+ speed = 400;
+ break;
+ case 1:
+ speed = 300;
+ break;
+ default:
+ /*
+ * assume reserved and vendor-specific are 200...
+ */
+ case 0:
+ speed = 200;
+ break;
+ }
+ dd->ipath_htspeed = speed;
+ }
+}
+
+static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev,
+ int pos)
+{
+ u32 int_handler_addr_lower;
+ u32 int_handler_addr_upper;
+ u64 ihandler;
+ u32 intvec;
+
+ /* use indirection register to get the intr handler */
+ pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x10);
+ pci_read_config_dword(pdev, pos + 4, &int_handler_addr_lower);
+ pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x11);
+ pci_read_config_dword(pdev, pos + 4, &int_handler_addr_upper);
+
+ ihandler = (u64) int_handler_addr_lower |
+ ((u64) int_handler_addr_upper << 32);
+
+ /*
+ * kernels with CONFIG_PCI_MSI set the vector in the irq field of
+ * struct pci_device, so we use that to program the HT-400 internal
+ * interrupt register (not config space) with that value. The BIOS
+ * must still have done the basic MSI setup.
+ */
+ intvec = pdev->irq;
+ /*
+ * clear any vector bits there; normally not set but we'll overload
+ * this for some debug purposes (setting the HTC debug register
+ * value from software, rather than GPIOs), so it might be set on a
+ * driver reload.
+ */
+ ihandler &= ~0xff0000;
+ /* x86 vector goes in intrinfo[23:16] */
+ ihandler |= intvec << 16;
+ ipath_cdbg(VERBOSE, "ihandler lower %x, upper %x, intvec %x, "
+ "interruptconfig %llx\n", int_handler_addr_lower,
+ int_handler_addr_upper, intvec,
+ (unsigned long long) ihandler);
+
+ /* can't program yet, so save for interrupt setup */
+ dd->ipath_intconfig = ihandler;
+ /* keep going, so we find link control stuff also */
+
+ return ihandler != 0;
+}
+
+/**
+ * ipath_setup_ht_config - setup the interruptconfig register
+ * @dd: the infinipath device
+ * @pdev: the PCI device
+ *
+ * setup the interruptconfig register from the HT config info.
+ * Also clear CRC errors in HT linkcontrol, if necessary.
+ * This is done only for the real hardware. It is done before
+ * chip address space is initted, so can't touch infinipath registers
+ */
+static int ipath_setup_ht_config(struct ipath_devdata *dd,
+ struct pci_dev *pdev)
+{
+ int pos, ret = 0;
+ int ihandler = 0;
+
+ /*
+ * Read the capability info to find the interrupt info, and also
+ * handle clearing CRC errors in linkctrl register if necessary. We
+ * do this early, before we ever enable errors or hardware errors,
+ * mostly to avoid causing the chip to enter freeze mode.
+ */
+ pos = pci_find_capability(pdev, HT_CAPABILITY_ID);
+ if (!pos) {
+ ipath_dev_err(dd, "Couldn't find HyperTransport "
+ "capability; no interrupts\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ do {
+ u8 cap_type;
+
+ /* the HT capability type byte is 3 bytes after the
+ * capability byte.
+ */
+ if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
+ dev_info(&pdev->dev, "Couldn't read config "
+ "command @ %d\n", pos);
+ continue;
+ }
+ if (!(cap_type & 0xE0))
+ slave_or_pri_blk(dd, pdev, pos, cap_type);
+ else if (cap_type == HT_INTR_DISC_CONFIG)
+ ihandler = set_int_handler(dd, pdev, pos);
+ } while ((pos = pci_find_next_capability(pdev, pos,
+ HT_CAPABILITY_ID)));
+
+ if (!ihandler) {
+ ipath_dev_err(dd, "Couldn't find interrupt handler in "
+ "config space\n");
+ ret = -ENODEV;
+ }
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the infinipath device
+ *
+ * Called during driver unload.
+ * This is currently a nop for the HT-400, not for all chips
+ */
+static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
+{
+}
+
+/**
+ * ipath_setup_ht_setextled - set the state of the two external LEDs
+ * @dd: the infinipath device
+ * @lst: the L state
+ * @ltst: the LT state
+ *
+ * Set the state of the two external LEDs, to indicate physical and
+ * logical state of IB link. For this chip (at least with recommended
+ * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
+ * (logical state)
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
+ u64 lst, u64 ltst)
+{
+ u64 extctl;
+
+ /* the diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running */
+ if (ipath_diag_inuse)
+ return;
+
+ /*
+ * start by setting both LED control bits to off, then turn
+ * on the appropriate bit(s).
+ */
+ if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
+ /*
+ * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
+ * is inverted, because it is normally used to indicate
+ * a hardware fault at reset, if there were errors
+ */
+ extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
+ | INFINIPATH_EXTC_LEDGBLERR_OFF;
+ if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
+ extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
+ if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
+ extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
+ }
+ else {
+ extctl = dd->ipath_extctrl &
+ ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
+ INFINIPATH_EXTC_LED2PRIPORT_ON);
+ if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
+ extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
+ if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
+ extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
+ }
+ dd->ipath_extctrl = extctl;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
+}
+
+static void ipath_init_ht_variables(void)
+{
+ ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
+ ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
+ ipath_gpio_sda = IPATH_GPIO_SDA;
+ ipath_gpio_scl = IPATH_GPIO_SCL;
+
+ infinipath_i_bitsextant =
+ (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
+ (INFINIPATH_I_RCVAVAIL_MASK <<
+ INFINIPATH_I_RCVAVAIL_SHIFT) |
+ INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
+ INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
+
+ infinipath_e_bitsextant =
+ INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
+ INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
+ INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
+ INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
+ INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
+ INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
+ INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+ INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
+ INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
+ INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
+ INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
+ INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
+ INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
+ INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
+ INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
+ INFINIPATH_E_HARDWARE;
+
+ infinipath_hwe_bitsextant =
+ (INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
+ (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
+ (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
+ INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
+ INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
+ INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
+ INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
+ INFINIPATH_HWE_HTCMISCERR4 |
+ INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
+ INFINIPATH_HWE_HTCMISCERR7 |
+ INFINIPATH_HWE_HTCBUSTREQPARITYERR |
+ INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
+ INFINIPATH_HWE_HTCBUSIREQPARITYERR |
+ INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
+ INFINIPATH_HWE_MEMBISTFAILED |
+ INFINIPATH_HWE_COREPLL_FBSLIP |
+ INFINIPATH_HWE_COREPLL_RFSLIP |
+ INFINIPATH_HWE_HTBPLL_FBSLIP |
+ INFINIPATH_HWE_HTBPLL_RFSLIP |
+ INFINIPATH_HWE_HTAPLL_FBSLIP |
+ INFINIPATH_HWE_HTAPLL_RFSLIP |
+ INFINIPATH_HWE_SERDESPLLFAILED |
+ INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
+ INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
+
+ infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
+ infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
+}
+
+/**
+ * ipath_ht_init_hwerrors - enable hardware errors
+ * @dd: the infinipath device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
+{
+ ipath_err_t val;
+ u64 extsval;
+
+ extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+
+ if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
+ ipath_dev_err(dd, "MemBIST did not complete!\n");
+
+ ipath_check_htlink(dd);
+
+ /* barring bugs, all hwerrors become interrupts, which can */
+ val = -1LL;
+ /* don't look at crc lane1 if 8 bit */
+ if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
+ val &= ~infinipath_hwe_htclnkabyte1crcerr;
+ /* don't look at crc lane1 if 8 bit */
+ if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
+ val &= ~infinipath_hwe_htclnkbbyte1crcerr;
+
+ /*
+ * disable RXDSYNCMEMPARITY because external serdes is unused,
+ * and therefore the logic will never be used or initialized,
+ * and uninitialized state will normally result in this error
+ * being asserted. Similarly for the external serdess pll
+ * lock signal.
+ */
+ val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
+ INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
+
+ /*
+ * Disable MISCERR4 because of an inversion in the HT core
+ * logic checking for errors that cause this bit to be set.
+ * The errata can also cause the protocol error bit to be set
+ * in the HT config space linkerror register(s).
+ */
+ val &= ~INFINIPATH_HWE_HTCMISCERR4;
+
+ /*
+ * PLL ignored because MDIO interface has a logic problem
+ * for reads, on Comstock and Ponderosa. BRINGUP
+ */
+ if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
+ val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+ dd->ipath_hwerrmask = val;
+}
+
+/**
+ * ipath_ht_bringup_serdes - bring up the serdes
+ * @dd: the infinipath device
+ */
+static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
+{
+ u64 val, config1;
+ int ret = 0, change = 0;
+
+ ipath_dbg("Trying to bringup serdes\n");
+
+ if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
+ INFINIPATH_HWE_SERDESPLLFAILED)
+ {
+ ipath_dbg("At start, serdes PLL failed bit set in "
+ "hwerrstatus, clearing and continuing\n");
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+ INFINIPATH_HWE_SERDESPLLFAILED);
+ }
+
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+ config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
+
+ ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
+ "config1=%llx, sstatus=%llx xgxs %llx\n",
+ (unsigned long long) val, (unsigned long long) config1,
+ (unsigned long long)
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
+ (unsigned long long)
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+ /* force reset on */
+ val |= INFINIPATH_SERDC0_RESET_PLL
+ /* | INFINIPATH_SERDC0_RESET_MASK */
+ ;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+ udelay(15); /* need pll reset set at least for a bit */
+
+ if (val & INFINIPATH_SERDC0_RESET_PLL) {
+ u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
+ /* set lane resets, and tx idle, during pll reset */
+ val2 |= INFINIPATH_SERDC0_RESET_MASK |
+ INFINIPATH_SERDC0_TXIDLE;
+ ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
+ "%llx)\n", (unsigned long long) val2);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
+ val2);
+ /*
+ * be sure chip saw it
+ */
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ /*
+ * need pll reset clear at least 11 usec before lane
+ * resets cleared; give it a few more
+ */
+ udelay(15);
+ val = val2; /* for check below */
+ }
+
+ if (val & (INFINIPATH_SERDC0_RESET_PLL |
+ INFINIPATH_SERDC0_RESET_MASK |
+ INFINIPATH_SERDC0_TXIDLE)) {
+ val &= ~(INFINIPATH_SERDC0_RESET_PLL |
+ INFINIPATH_SERDC0_RESET_MASK |
+ INFINIPATH_SERDC0_TXIDLE);
+ /* clear them */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
+ val);
+ }
+
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+ if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
+ INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
+ val &= ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
+ INFINIPATH_XGXS_MDIOADDR_SHIFT);
+ /*
+ * we use address 3
+ */
+ val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
+ change = 1;
+ }
+ if (val & INFINIPATH_XGXS_RESET) {
+ /* normally true after boot */
+ val &= ~INFINIPATH_XGXS_RESET;
+ change = 1;
+ }
+ if (change)
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+
+ /* clear current and de-emphasis bits */
+ config1 &= ~0x0ffffffff00ULL;
+ /* set current to 20ma */
+ config1 |= 0x00000000000ULL;
+ /* set de-emphasis to -5.68dB */
+ config1 |= 0x0cccc000000ULL;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
+
+ ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
+ "config1=%llx, sstatus=%llx xgxs %llx\n",
+ (unsigned long long) val, (unsigned long long) config1,
+ (unsigned long long)
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
+ (unsigned long long)
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+ if (!ipath_waitfor_mdio_cmdready(dd)) {
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_mdio,
+ ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
+ IPATH_MDIO_CTRL_XGXS_REG_8,
+ 0));
+ if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
+ IPATH_MDIO_DATAVALID, &val))
+ ipath_dbg("Never got MDIO data for XGXS status "
+ "read\n");
+ else
+ ipath_cdbg(VERBOSE, "MDIO Read reg8, "
+ "'bank' 31 %x\n", (u32) val);
+ } else
+ ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
+
+ return ret; /* for now, say we always succeeded */
+}
+
+/**
+ * ipath_ht_quiet_serdes - set serdes to txidle
+ * @dd: the infinipath device
+ * driver is being unloaded
+ */
+static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
+{
+ u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+
+ val |= INFINIPATH_SERDC0_TXIDLE;
+ ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
+ (unsigned long long) val);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+}
+
+static int ipath_ht_intconfig(struct ipath_devdata *dd)
+{
+ int ret;
+
+ if (!dd->ipath_intconfig) {
+ ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
+ "interrupt address\n");
+ ret = 1;
+ goto bail;
+ }
+
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
+ dd->ipath_intconfig); /* interrupt address */
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_pe_put_tid - write a TID in chip
+ * @dd: the infinipath device
+ * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for special locking etc.
+ * It's used for both the full cleanup on exit, as well as the normal
+ * setup and teardown.
+ */
+static void ipath_ht_put_tid(struct ipath_devdata *dd,
+ u64 __iomem *tidptr, u32 type,
+ unsigned long pa)
+{
+ if (pa != dd->ipath_tidinvalid) {
+ if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
+ dev_info(&dd->pcidev->dev,
+ "physaddr %lx has more than "
+ "40 bits, using only 40!!!\n", pa);
+ pa &= INFINIPATH_RT_ADDR_MASK;
+ }
+ if (type == 0)
+ pa |= dd->ipath_tidtemplate;
+ else {
+ /* in words (fixed, full page). */
+ u64 lenvalid = PAGE_SIZE >> 2;
+ lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
+ pa |= lenvalid | INFINIPATH_RT_VALID;
+ }
+ }
+ if (dd->ipath_kregbase)
+ writeq(pa, tidptr);
+}
+
+/**
+ * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
+ * @dd: the infinipath device
+ * @port: the port
+ *
+ * Used from ipath_close(), and at chip initialization.
+ */
+static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
+{
+ u64 __iomem *tidbase;
+ int i;
+
+ if (!dd->ipath_kregbase)
+ return;
+
+ ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
+
+ /*
+ * need to invalidate all of the expected TID entries for this
+ * port, so we don't have valid entries that might somehow get
+ * used (early in next use of this port, or through some bug)
+ */
+ tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
+ dd->ipath_rcvtidbase +
+ port * dd->ipath_rcvtidcnt *
+ sizeof(*tidbase));
+ for (i = 0; i < dd->ipath_rcvtidcnt; i++)
+ ipath_ht_put_tid(dd, &tidbase[i], 1, dd->ipath_tidinvalid);
+
+ tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
+ dd->ipath_rcvegrbase +
+ port * dd->ipath_rcvegrcnt *
+ sizeof(*tidbase));
+
+ for (i = 0; i < dd->ipath_rcvegrcnt; i++)
+ ipath_ht_put_tid(dd, &tidbase[i], 0, dd->ipath_tidinvalid);
+}
+
+/**
+ * ipath_ht_tidtemplate - setup constants for TID updates
+ * @dd: the infinipath device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
+{
+ dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
+ dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
+ dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
+
+ /*
+ * work around chip errata bug 7358, by marking invalid tids
+ * as having max length
+ */
+ dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
+ INFINIPATH_RT_BUFSIZE_SHIFT;
+}
+
+static int ipath_ht_early_init(struct ipath_devdata *dd)
+{
+ u32 __iomem *piobuf;
+ u32 pioincr, val32, egrsize;
+ int i;
+
+ /*
+ * one cache line; long IB headers will spill over into received
+ * buffer
+ */
+ dd->ipath_rcvhdrentsize = 16;
+ dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
+
+ /*
+ * For HT-400, we allocate a somewhat overly large eager buffer,
+ * such that we can guarantee that we can receive the largest
+ * packet that we can send out. To truly support a 4KB MTU,
+ * we need to bump this to a large value. To date, other than
+ * testing, we have never encountered an HCA that can really
+ * send 4KB MTU packets, so we do not handle that (we'll get
+ * errors interrupts if we ever see one).
+ */
+ dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
+ egrsize = dd->ipath_rcvegrbufsize;
+
+ /*
+ * the min() check here is currently a nop, but it may not
+ * always be, depending on just how we do ipath_rcvegrbufsize
+ */
+ dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
+ dd->ipath_rcvegrbufsize);
+ dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
+ ipath_ht_tidtemplate(dd);
+
+ /*
+ * zero all the TID entries at startup. We do this for sanity,
+ * in case of a previous driver crash of some kind, and also
+ * because the chip powers up with these memories in an unknown
+ * state. Use portcnt, not cfgports, since this is for the
+ * full chip, not for current (possibly different) configuration
+ * value.
+ * Chip Errata bug 6447
+ */
+ for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
+ ipath_ht_clear_tids(dd, val32);
+
+ /*
+ * write the pbc of each buffer, to be sure it's initialized, then
+ * cancel all the buffers, and also abort any packets that might
+ * have been in flight for some reason (the latter is for driver
+ * unload/reload, but isn't a bad idea at first init). PIO send
+ * isn't enabled at this point, so there is no danger of sending
+ * these out on the wire.
+ * Chip Errata bug 6610
+ */
+ piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
+ dd->ipath_piobufbase);
+ pioincr = dd->ipath_palign / sizeof(*piobuf);
+ for (i = 0; i < dd->ipath_piobcnt2k; i++) {
+ /*
+ * reasonable word count, just to init pbc
+ */
+ writel(16, piobuf);
+ piobuf += pioincr;
+ }
+ /*
+ * self-clearing
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ INFINIPATH_S_ABORT);
+ return 0;
+}
+
+/**
+ * ipath_init_ht_get_base_info - set chip-specific flags for user code
+ * @dd: the infinipath device
+ * @kbase: ipath_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
+{
+ struct ipath_base_info *kinfo = kbase;
+
+ kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
+ IPATH_RUNTIME_RCVHDR_COPY;
+
+ return 0;
+}
+
+/**
+ * ipath_init_ht400_funcs - set up the chip-specific function pointers
+ * @dd: the infinipath device
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+void ipath_init_ht400_funcs(struct ipath_devdata *dd)
+{
+ dd->ipath_f_intrsetup = ipath_ht_intconfig;
+ dd->ipath_f_bus = ipath_setup_ht_config;
+ dd->ipath_f_reset = ipath_setup_ht_reset;
+ dd->ipath_f_get_boardname = ipath_ht_boardname;
+ dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
+ dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
+ dd->ipath_f_early_init = ipath_ht_early_init;
+ dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
+ dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
+ dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
+ dd->ipath_f_clear_tids = ipath_ht_clear_tids;
+ dd->ipath_f_put_tid = ipath_ht_put_tid;
+ dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
+ dd->ipath_f_setextled = ipath_setup_ht_setextled;
+ dd->ipath_f_get_base_info = ipath_ht_get_base_info;
+
+ /*
+ * initialize chip-specific variables
+ */
+ dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
+
+ /*
+ * setup the register offsets, since they are different for each
+ * chip
+ */
+ dd->ipath_kregs = &ipath_ht_kregs;
+ dd->ipath_cregs = &ipath_ht_cregs;
+
+ /*
+ * do very early init that is needed before ipath_f_bus is
+ * called
+ */
+ ipath_init_ht_variables();
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
new file mode 100644
index 0000000000000..2823ff9c0c62d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "ipath_kernel.h"
+#include "ips_common.h"
+
+/*
+ * min buffers we want to have per port, after driver
+ */
+#define IPATH_MIN_USER_PORT_BUFCNT 8
+
+/*
+ * Number of ports we are configured to use (to allow for more pio
+ * buffers per port, etc.) Zero means use chip value.
+ */
+static ushort ipath_cfgports;
+
+module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
+MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
+
+/*
+ * Number of buffers reserved for driver (layered drivers and SMA
+ * send). Reserved at end of buffer list.
+ */
+static ushort ipath_kpiobufs = 32;
+
+static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
+
+module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint,
+ &ipath_kpiobufs, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
+
+/**
+ * create_port0_egr - allocate the eager TID buffers
+ * @dd: the infinipath device
+ *
+ * This code is now quite different for user and kernel, because
+ * the kernel uses skb's, for the accelerated network performance.
+ * This is the kernel (port0) version.
+ *
+ * Allocate the eager TID buffers and program them into infinipath.
+ * We use the network layer alloc_skb() allocator to allocate the
+ * memory, and either use the buffers as is for things like SMA
+ * packets, or pass the buffers up to the ipath layered driver and
+ * thence the network layer, replacing them as we do so (see
+ * ipath_rcv_layer()).
+ */
+static int create_port0_egr(struct ipath_devdata *dd)
+{
+ unsigned e, egrcnt;
+ struct sk_buff **skbs;
+ int ret;
+
+ egrcnt = dd->ipath_rcvegrcnt;
+
+ skbs = vmalloc(sizeof(*dd->ipath_port0_skbs) * egrcnt);
+ if (skbs == NULL) {
+ ipath_dev_err(dd, "allocation error for eager TID "
+ "skb array\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ for (e = 0; e < egrcnt; e++) {
+ /*
+ * This is a bit tricky in that we allocate extra
+ * space for 2 bytes of the 14 byte ethernet header.
+ * These two bytes are passed in the ipath header so
+ * the rest of the data is word aligned. We allocate
+ * 4 bytes so that the data buffer stays word aligned.
+ * See ipath_kreceive() for more details.
+ */
+ skbs[e] = ipath_alloc_skb(dd, GFP_KERNEL);
+ if (!skbs[e]) {
+ ipath_dev_err(dd, "SKB allocation error for "
+ "eager TID %u\n", e);
+ while (e != 0)
+ dev_kfree_skb(skbs[--e]);
+ ret = -ENOMEM;
+ goto bail;
+ }
+ }
+ /*
+ * After loop above, so we can test non-NULL to see if ready
+ * to use at receive, etc.
+ */
+ dd->ipath_port0_skbs = skbs;
+
+ for (e = 0; e < egrcnt; e++) {
+ unsigned long phys =
+ virt_to_phys(dd->ipath_port0_skbs[e]->data);
+ dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
+ ((char __iomem *) dd->ipath_kregbase +
+ dd->ipath_rcvegrbase), 0, phys);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int bringup_link(struct ipath_devdata *dd)
+{
+ u64 val, ibc;
+ int ret = 0;
+
+ /* hold IBC in reset */
+ dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+ dd->ipath_control);
+
+ /*
+ * Note that prior to try 14 or 15 of IB, the credit scaling
+ * wasn't working, because it was swapped for writes with the
+ * 1 bit default linkstate field
+ */
+
+ /* ignore pbc and align word */
+ val = dd->ipath_piosize2k - 2 * sizeof(u32);
+ /*
+ * for ICRC, which we only send in diag test pkt mode, and we
+ * don't need to worry about that for mtu
+ */
+ val += 1;
+ /*
+ * Set the IBC maxpktlength to the size of our pio buffers the
+ * maxpktlength is in words. This is *not* the IB data MTU.
+ */
+ ibc = (val / sizeof(u32)) << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
+ /* in KB */
+ ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
+ /*
+ * How often flowctrl sent. More or less in usecs; balance against
+ * watermark value, so that in theory senders always get a flow
+ * control update in time to not let the IB link go idle.
+ */
+ ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;
+ /* max error tolerance */
+ ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
+ /* use "real" buffer space for */
+ ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;
+ /* IB credit flow control. */
+ ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
+ /* initially come up waiting for TS1, without sending anything. */
+ dd->ipath_ibcctrl = ibc;
+ /*
+ * Want to start out with both LINKCMD and LINKINITCMD in NOP
+ * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that
+ * to stay a NOP
+ */
+ ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
+ INFINIPATH_IBCC_LINKINITCMD_SHIFT;
+ ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
+ (unsigned long long) ibc);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
+
+ // be sure chip saw it
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+
+ ret = dd->ipath_f_bringup_serdes(dd);
+
+ if (ret)
+ dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "
+ "not usable\n");
+ else {
+ /* enable IBC */
+ dd->ipath_control |= INFINIPATH_C_LINKENABLE;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+ dd->ipath_control);
+ }
+
+ return ret;
+}
+
+static int init_chip_first(struct ipath_devdata *dd,
+ struct ipath_portdata **pdp)
+{
+ struct ipath_portdata *pd = NULL;
+ int ret = 0;
+ u64 val;
+
+ /*
+ * skip cfgports stuff because we are not allocating memory,
+ * and we don't want problems if the portcnt changed due to
+ * cfgports. We do still check and report a difference, if
+ * not same (should be impossible).
+ */
+ dd->ipath_portcnt =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
+ if (!ipath_cfgports)
+ dd->ipath_cfgports = dd->ipath_portcnt;
+ else if (ipath_cfgports <= dd->ipath_portcnt) {
+ dd->ipath_cfgports = ipath_cfgports;
+ ipath_dbg("Configured to use %u ports out of %u in chip\n",
+ dd->ipath_cfgports, dd->ipath_portcnt);
+ } else {
+ dd->ipath_cfgports = dd->ipath_portcnt;
+ ipath_dbg("Tried to configured to use %u ports; chip "
+ "only supports %u\n", ipath_cfgports,
+ dd->ipath_portcnt);
+ }
+ dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports,
+ GFP_KERNEL);
+
+ if (!dd->ipath_pd) {
+ ipath_dev_err(dd, "Unable to allocate portdata array, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ dd->ipath_lastegrheads = kzalloc(sizeof(*dd->ipath_lastegrheads)
+ * dd->ipath_cfgports,
+ GFP_KERNEL);
+ dd->ipath_lastrcvhdrqtails =
+ kzalloc(sizeof(*dd->ipath_lastrcvhdrqtails)
+ * dd->ipath_cfgports, GFP_KERNEL);
+
+ if (!dd->ipath_lastegrheads || !dd->ipath_lastrcvhdrqtails) {
+ ipath_dev_err(dd, "Unable to allocate head arrays, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ dd->ipath_pd[0] = kzalloc(sizeof(*pd), GFP_KERNEL);
+
+ if (!dd->ipath_pd[0]) {
+ ipath_dev_err(dd, "Unable to allocate portdata for port "
+ "0, failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+ pd = dd->ipath_pd[0];
+ pd->port_dd = dd;
+ pd->port_port = 0;
+ pd->port_cnt = 1;
+ /* The port 0 pkey table is used by the layer interface. */
+ pd->port_pkeys[0] = IPS_DEFAULT_P_KEY;
+ dd->ipath_rcvtidcnt =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
+ dd->ipath_rcvtidbase =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
+ dd->ipath_rcvegrcnt =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
+ dd->ipath_rcvegrbase =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
+ dd->ipath_palign =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
+ dd->ipath_piobufbase =
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
+ dd->ipath_piosize2k = val & ~0U;
+ dd->ipath_piosize4k = val >> 32;
+ dd->ipath_ibmtu = 4096; /* default to largest legal MTU */
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
+ dd->ipath_piobcnt2k = val & ~0U;
+ dd->ipath_piobcnt4k = val >> 32;
+ dd->ipath_pio2kbase =
+ (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
+ (dd->ipath_piobufbase & 0xffffffff));
+ if (dd->ipath_piobcnt4k) {
+ dd->ipath_pio4kbase = (u32 __iomem *)
+ (((char __iomem *) dd->ipath_kregbase) +
+ (dd->ipath_piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,
+ dd->ipath_palign);
+ ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "
+ "(%x aligned)\n",
+ dd->ipath_piobcnt2k, dd->ipath_piosize2k,
+ dd->ipath_pio2kbase, dd->ipath_piobcnt4k,
+ dd->ipath_piosize4k, dd->ipath_pio4kbase,
+ dd->ipath_4kalign);
+ }
+ else ipath_dbg("%u 2k piobufs @ %p\n",
+ dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
+
+ spin_lock_init(&dd->ipath_tid_lock);
+
+done:
+ *pdp = pd;
+ return ret;
+}
+
+/**
+ * init_chip_reset - re-initialize after a reset, or enable
+ * @dd: the infinipath device
+ * @pdp: output for port data
+ *
+ * sanity check at least some of the values after reset, and
+ * ensure no receive or transmit (explictly, in case reset
+ * failed
+ */
+static int init_chip_reset(struct ipath_devdata *dd,
+ struct ipath_portdata **pdp)
+{
+ struct ipath_portdata *pd;
+ u32 rtmp;
+
+ *pdp = pd = dd->ipath_pd[0];
+ /* ensure chip does no sends or receives while we re-initialize */
+ dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0);
+
+ rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
+ if (dd->ipath_portcnt != rtmp)
+ dev_info(&dd->pcidev->dev, "portcnt was %u before "
+ "reset, now %u, using original\n",
+ dd->ipath_portcnt, rtmp);
+ rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
+ if (rtmp != dd->ipath_rcvtidcnt)
+ dev_info(&dd->pcidev->dev, "tidcnt was %u before "
+ "reset, now %u, using original\n",
+ dd->ipath_rcvtidcnt, rtmp);
+ rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
+ if (rtmp != dd->ipath_rcvtidbase)
+ dev_info(&dd->pcidev->dev, "tidbase was %u before "
+ "reset, now %u, using original\n",
+ dd->ipath_rcvtidbase, rtmp);
+ rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
+ if (rtmp != dd->ipath_rcvegrcnt)
+ dev_info(&dd->pcidev->dev, "egrcnt was %u before "
+ "reset, now %u, using original\n",
+ dd->ipath_rcvegrcnt, rtmp);
+ rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
+ if (rtmp != dd->ipath_rcvegrbase)
+ dev_info(&dd->pcidev->dev, "egrbase was %u before "
+ "reset, now %u, using original\n",
+ dd->ipath_rcvegrbase, rtmp);
+
+ return 0;
+}
+
+static int init_pioavailregs(struct ipath_devdata *dd)
+{
+ int ret;
+
+ dd->ipath_pioavailregs_dma = dma_alloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,
+ GFP_KERNEL);
+ if (!dd->ipath_pioavailregs_dma) {
+ ipath_dev_err(dd, "failed to allocate PIOavail reg area "
+ "in memory\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * we really want L2 cache aligned, but for current CPUs of
+ * interest, they are the same.
+ */
+ dd->ipath_statusp = (u64 *)
+ ((char *)dd->ipath_pioavailregs_dma +
+ ((2 * L1_CACHE_BYTES +
+ dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
+ /* copy the current value now that it's really allocated */
+ *dd->ipath_statusp = dd->_ipath_status;
+ /*
+ * setup buffer to hold freeze msg, accessible to apps,
+ * following statusp
+ */
+ dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];
+ /* and its length */
+ dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
+
+ if (dd->ipath_unit * 64 > (IPATH_PORT0_RCVHDRTAIL_SIZE - 64)) {
+ ipath_dev_err(dd, "unit %u too large for port 0 "
+ "rcvhdrtail buffer size\n", dd->ipath_unit);
+ ret = -ENODEV;
+ }
+ else
+ ret = 0;
+
+ /* so we can get current tail in ipath_kreceive(), per chip */
+ dd->ipath_hdrqtailptr = &ipath_port0_rcvhdrtail[
+ dd->ipath_unit * (64 / sizeof(*ipath_port0_rcvhdrtail))];
+done:
+ return ret;
+}
+
+/**
+ * init_shadow_tids - allocate the shadow TID array
+ * @dd: the infinipath device
+ *
+ * allocate the shadow TID array, so we can ipath_munlock previous
+ * entries. It may make more sense to move the pageshadow to the
+ * port data structure, so we only allocate memory for ports actually
+ * in use, since we at 8k per port, now.
+ */
+static void init_shadow_tids(struct ipath_devdata *dd)
+{
+ dd->ipath_pageshadow = (struct page **)
+ vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
+ sizeof(struct page *));
+ if (!dd->ipath_pageshadow)
+ ipath_dev_err(dd, "failed to allocate shadow page * "
+ "array, no expected sends!\n");
+ else
+ memset(dd->ipath_pageshadow, 0,
+ dd->ipath_cfgports * dd->ipath_rcvtidcnt *
+ sizeof(struct page *));
+}
+
+static void enable_chip(struct ipath_devdata *dd,
+ struct ipath_portdata *pd, int reinit)
+{
+ u32 val;
+ int i;
+
+ if (!reinit) {
+ init_waitqueue_head(&ipath_sma_state_wait);
+ }
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+
+ /* Enable PIO send, and update of PIOavail regs to memory. */
+ dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
+ INFINIPATH_S_PIOBUFAVAILUPD;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+
+ /*
+ * enable port 0 receive, and receive interrupt. other ports
+ * done as user opens and inits them.
+ */
+ dd->ipath_rcvctrl = INFINIPATH_R_TAILUPD |
+ (1ULL << INFINIPATH_R_PORTENABLE_SHIFT) |
+ (1ULL << INFINIPATH_R_INTRAVAIL_SHIFT);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+
+ /*
+ * now ready for use. this should be cleared whenever we
+ * detect a reset, or initiate one.
+ */
+ dd->ipath_flags |= IPATH_INITTED;
+
+ /*
+ * init our shadow copies of head from tail values, and write
+ * head values to match.
+ */
+ val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
+ (void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
+ dd->ipath_port0head = ipath_read_ureg32(dd, ur_rcvhdrtail, 0);
+
+ /* Initialize so we interrupt on next packet received */
+ (void)ipath_write_ureg(dd, ur_rcvhdrhead,
+ dd->ipath_rhdrhead_intr_off |
+ dd->ipath_port0head, 0);
+
+ /*
+ * by now pioavail updates to memory should have occurred, so
+ * copy them into our working/shadow registers; this is in
+ * case something went wrong with abort, but mostly to get the
+ * initial values of the generation bit correct.
+ */
+ for (i = 0; i < dd->ipath_pioavregs; i++) {
+ __le64 val;
+
+ /*
+ * Chip Errata bug 6641; even and odd qwords>3 are swapped.
+ */
+ if (i > 3) {
+ if (i & 1)
+ val = dd->ipath_pioavailregs_dma[i - 1];
+ else
+ val = dd->ipath_pioavailregs_dma[i + 1];
+ }
+ else
+ val = dd->ipath_pioavailregs_dma[i];
+ dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
+ }
+ /* can get counters, stats, etc. */
+ dd->ipath_flags |= IPATH_PRESENT;
+}
+
+static int init_housekeeping(struct ipath_devdata *dd,
+ struct ipath_portdata **pdp, int reinit)
+{
+ char boardn[32];
+ int ret = 0;
+
+ /*
+ * have to clear shadow copies of registers at init that are
+ * not otherwise set here, or all kinds of bizarre things
+ * happen with driver on chip reset
+ */
+ dd->ipath_rcvhdrsize = 0;
+
+ /*
+ * Don't clear ipath_flags as 8bit mode was set before
+ * entering this func. However, we do set the linkstate to
+ * unknown, so we can watch for a transition.
+ */
+ dd->ipath_flags |= IPATH_LINKUNK;
+ dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
+ IPATH_LINKDOWN | IPATH_LINKINIT);
+
+ ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");
+ dd->ipath_revision =
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
+
+ /*
+ * set up fundamental info we need to use the chip; we assume
+ * if the revision reg and these regs are OK, we don't need to
+ * special case the rest
+ */
+ dd->ipath_sregbase =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);
+ dd->ipath_cregbase =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);
+ dd->ipath_uregbase =
+ ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);
+ ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "
+ "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,
+ dd->ipath_uregbase, dd->ipath_cregbase);
+ if ((dd->ipath_revision & 0xffffffff) == 0xffffffff
+ || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff
+ || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff
+ || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
+ ipath_dev_err(dd, "Register read failures from chip, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ /* clear the initial reset flag, in case first driver load */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
+ INFINIPATH_E_RESET);
+
+ if (reinit)
+ ret = init_chip_reset(dd, pdp);
+ else
+ ret = init_chip_first(dd, pdp);
+
+ if (ret)
+ goto done;
+
+ ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, "
+ "%u egrtids\n", (unsigned long long) dd->ipath_revision,
+ dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt,
+ dd->ipath_rcvegrcnt);
+
+ if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
+ INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
+ ipath_dev_err(dd, "Driver only handles version %d, "
+ "chip swversion is %d (%llx), failng\n",
+ IPATH_CHIP_SWVERSION,
+ (int)(dd->ipath_revision >>
+ INFINIPATH_R_SOFTWARE_SHIFT) &
+ INFINIPATH_R_SOFTWARE_MASK,
+ (unsigned long long) dd->ipath_revision);
+ ret = -ENOSYS;
+ goto done;
+ }
+ dd->ipath_majrev = (u8) ((dd->ipath_revision >>
+ INFINIPATH_R_CHIPREVMAJOR_SHIFT) &
+ INFINIPATH_R_CHIPREVMAJOR_MASK);
+ dd->ipath_minrev = (u8) ((dd->ipath_revision >>
+ INFINIPATH_R_CHIPREVMINOR_SHIFT) &
+ INFINIPATH_R_CHIPREVMINOR_MASK);
+ dd->ipath_boardrev = (u8) ((dd->ipath_revision >>
+ INFINIPATH_R_BOARDID_SHIFT) &
+ INFINIPATH_R_BOARDID_MASK);
+
+ ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
+
+ snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
+ "Driver %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
+ "SW Compat %u\n",
+ IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
+ (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
+ INFINIPATH_R_ARCH_MASK,
+ dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,
+ (unsigned)(dd->ipath_revision >>
+ INFINIPATH_R_SOFTWARE_SHIFT) &
+ INFINIPATH_R_SOFTWARE_MASK);
+
+ ipath_dbg("%s", dd->ipath_boardversion);
+
+done:
+ return ret;
+}
+
+
+/**
+ * ipath_init_chip - do the actual initialization sequence on the chip
+ * @dd: the infinipath device
+ * @reinit: reinitializing, so don't allocate new memory
+ *
+ * Do the actual initialization sequence on the chip. This is done
+ * both from the init routine called from the PCI infrastructure, and
+ * when we reset the chip, or detect that it was reset internally,
+ * or it's administratively re-enabled.
+ *
+ * Memory allocation here and in called routines is only done in
+ * the first case (reinit == 0). We have to be careful, because even
+ * without memory allocation, we need to re-write all the chip registers
+ * TIDs, etc. after the reset or enable has completed.
+ */
+int ipath_init_chip(struct ipath_devdata *dd, int reinit)
+{
+ int ret = 0, i;
+ u32 val32, kpiobufs;
+ u64 val, atmp;
+ struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
+
+ ret = init_housekeeping(dd, &pd, reinit);
+ if (ret)
+ goto done;
+
+ /*
+ * we ignore most issues after reporting them, but have to specially
+ * handle hardware-disabled chips.
+ */
+ if (ret == 2) {
+ /* unique error, known to ipath_init_one */
+ ret = -EPERM;
+ goto done;
+ }
+
+ /*
+ * We could bump this to allow for full rcvegrcnt + rcvtidcnt,
+ * but then it no longer nicely fits power of two, and since
+ * we now use routines that backend onto __get_free_pages, the
+ * rest would be wasted.
+ */
+ dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
+ dd->ipath_rcvhdrcnt);
+
+ /*
+ * Set up the shadow copies of the piobufavail registers,
+ * which we compare against the chip registers for now, and
+ * the in memory DMA'ed copies of the registers. This has to
+ * be done early, before we calculate lastport, etc.
+ */
+ val = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
+ /*
+ * calc number of pioavail registers, and save it; we have 2
+ * bits per buffer.
+ */
+ dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
+ / (sizeof(u64) * BITS_PER_BYTE / 2);
+ if (!ipath_kpiobufs) /* have to have at least 1, for SMA */
+ kpiobufs = ipath_kpiobufs = 1;
+ else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) <
+ (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) {
+ dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) "
+ "for %u ports to have %u each!\n",
+ dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
+ dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT);
+ kpiobufs = 1; /* reserve just the minimum for SMA/ether */
+ } else
+ kpiobufs = ipath_kpiobufs;
+
+ if (kpiobufs >
+ (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
+ (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) {
+ i = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
+ (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT);
+ if (i < 0)
+ i = 0;
+ dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs for "
+ "kernel leaves too few for %d user ports "
+ "(%d each); using %u\n", kpiobufs,
+ dd->ipath_cfgports - 1,
+ IPATH_MIN_USER_PORT_BUFCNT, i);
+ /*
+ * shouldn't change ipath_kpiobufs, because could be
+ * different for different devices...
+ */
+ kpiobufs = i;
+ }
+ dd->ipath_lastport_piobuf =
+ dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - kpiobufs;
+ dd->ipath_pbufsport = dd->ipath_cfgports > 1
+ ? dd->ipath_lastport_piobuf / (dd->ipath_cfgports - 1)
+ : 0;
+ val32 = dd->ipath_lastport_piobuf -
+ (dd->ipath_pbufsport * (dd->ipath_cfgports - 1));
+ if (val32 > 0) {
+ ipath_dbg("allocating %u pbufs/port leaves %u unused, "
+ "add to kernel\n", dd->ipath_pbufsport, val32);
+ dd->ipath_lastport_piobuf -= val32;
+ ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
+ dd->ipath_pbufsport, val32);
+ }
+ dd->ipath_lastpioindex = dd->ipath_lastport_piobuf;
+ ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
+ "each for %u user ports\n", kpiobufs,
+ dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
+ dd->ipath_pbufsport, dd->ipath_cfgports - 1);
+
+ dd->ipath_f_early_init(dd);
+
+ /* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
+ * done after early_init */
+ dd->ipath_hdrqlast =
+ dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
+ dd->ipath_rcvhdrentsize);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
+ dd->ipath_rcvhdrsize);
+
+ if (!reinit) {
+ ret = init_pioavailregs(dd);
+ init_shadow_tids(dd);
+ if (ret)
+ goto done;
+ }
+
+ (void)ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
+ dd->ipath_pioavailregs_phys);
+ /*
+ * this is to detect s/w errors, which the h/w works around by
+ * ignoring the low 6 bits of address, if it wasn't aligned.
+ */
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);
+ if (val != dd->ipath_pioavailregs_phys) {
+ ipath_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->ipath_pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ val = ipath_port0_rcvhdrtail_dma + dd->ipath_unit * 64;
+
+ /* verify that the alignment requirement was met */
+ ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
+ 0, val);
+ atmp = ipath_read_kreg64_port(
+ dd, dd->ipath_kregs->kr_rcvhdrtailaddr, 0);
+ if (val != atmp) {
+ ipath_dev_err(dd, "Catastrophic software error, "
+ "RcvHdrTailAddr0 written as %llx, "
+ "read back as %llx from %x\n",
+ (unsigned long long) val,
+ (unsigned long long) atmp,
+ dd->ipath_kregs->kr_rcvhdrtailaddr);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
+
+ /*
+ * make sure we are not in freeze, and PIO send enabled, so
+ * writes to pbc happen
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+ ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ INFINIPATH_S_PIOENABLE);
+
+ /*
+ * before error clears, since we expect serdes pll errors during
+ * this, the first time after reset
+ */
+ if (bringup_link(dd)) {
+ dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");
+ ret = -ENETDOWN;
+ goto done;
+ }
+
+ /*
+ * clear any "expected" hwerrs from reset and/or initialization
+ * clear any that aren't enabled (at least this once), and then
+ * set the enable mask
+ */
+ dd->ipath_f_init_hwerrors(dd);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+ ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+ dd->ipath_hwerrmask);
+
+ dd->ipath_maskederrs = dd->ipath_ignorederrs;
+ /* clear all */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
+ /* enable errors that are masked, at least this first time. */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
+ ~dd->ipath_maskederrs);
+ /* clear any interrups up to this point (ints still not enabled) */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
+
+ ipath_stats.sps_lid[dd->ipath_unit] = dd->ipath_lid;
+
+ /*
+ * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
+ * re-init, the simplest way to handle this is to free
+ * existing, and re-allocate.
+ */
+ if (reinit)
+ ipath_free_pddata(dd, 0, 0);
+ dd->ipath_f_tidtemplate(dd);
+ ret = ipath_create_rcvhdrq(dd, pd);
+ if (!ret)
+ ret = create_port0_egr(dd);
+ if (ret)
+ ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
+ "rcvhdrq and/or egr bufs\n");
+ else
+ enable_chip(dd, pd, reinit);
+
+ /*
+ * cause retrigger of pending interrupts ignored during init,
+ * even if we had errors
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
+
+ if(!dd->ipath_stats_timer_active) {
+ /*
+ * first init, or after an admin disable/enable
+ * set up stats retrieval timer, even if we had errors
+ * in last portion of setup
+ */
+ init_timer(&dd->ipath_stats_timer);
+ dd->ipath_stats_timer.function = ipath_get_faststats;
+ dd->ipath_stats_timer.data = (unsigned long) dd;
+ /* every 5 seconds; */
+ dd->ipath_stats_timer.expires = jiffies + 5 * HZ;
+ /* takes ~16 seconds to overflow at full IB 4x bandwdith */
+ add_timer(&dd->ipath_stats_timer);
+ dd->ipath_stats_timer_active = 1;
+ }
+
+done:
+ if (!ret) {
+ ipath_get_guid(dd);
+ *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
+ if (!dd->ipath_f_intrsetup(dd)) {
+ /* now we can enable all interrupts from the chip */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
+ -1LL);
+ /* force re-interrupt of any pending interrupts. */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,
+ 0ULL);
+ /* chip is usable; mark it as initialized */
+ *dd->ipath_statusp |= IPATH_STATUS_INITTED;
+ } else
+ ipath_dev_err(dd, "No interrupts enabled, couldn't "
+ "setup interrupt address\n");
+
+ if (dd->ipath_cfgports > ipath_stats.sps_nports)
+ /*
+ * sps_nports is a global, so, we set it to
+ * the highest number of ports of any of the
+ * chips we find; we never decrement it, at
+ * least for now. Since this might have changed
+ * over disable/enable or prior to reset, always
+ * do the check and potentially adjust.
+ */
+ ipath_stats.sps_nports = dd->ipath_cfgports;
+ } else
+ ipath_dbg("Failed (%d) to initialize chip\n", ret);
+
+ /* if ret is non-zero, we probably should do some cleanup
+ here... */
+ return ret;
+}
+
+static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
+{
+ struct ipath_devdata *dd;
+ unsigned long flags;
+ unsigned short val;
+ int ret;
+
+ ret = ipath_parse_ushort(str, &val);
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ if (ret < 0)
+ goto bail;
+
+ if (val == 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
+ if (dd->ipath_kregbase)
+ continue;
+ if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
+ (dd->ipath_cfgports *
+ IPATH_MIN_USER_PORT_BUFCNT)))
+ {
+ ipath_dev_err(
+ dd,
+ "Allocating %d PIO bufs for kernel leaves "
+ "too few for %d user ports (%d each)\n",
+ val, dd->ipath_cfgports - 1,
+ IPATH_MIN_USER_PORT_BUFCNT);
+ ret = -EINVAL;
+ goto bail;
+ }
+ dd->ipath_lastport_piobuf =
+ dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
+ }
+
+ ret = 0;
+bail:
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
new file mode 100644
index 0000000000000..60f5f41080692
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -0,0 +1,841 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+
+#include "ipath_kernel.h"
+#include "ips_common.h"
+#include "ipath_layer.h"
+
+#define E_SUM_PKTERRS \
+ (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
+ INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
+ INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
+ INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
+ INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
+ INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
+
+#define E_SUM_ERRS \
+ (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
+ INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
+ INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
+ INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
+ INFINIPATH_E_INVALIDADDR)
+
+static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
+{
+ unsigned long sbuf[4];
+ u64 ignore_this_time = 0;
+ u32 piobcnt;
+
+ /* if possible that sendbuffererror could be valid */
+ piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
+ /* read these before writing errorclear */
+ sbuf[0] = ipath_read_kreg64(
+ dd, dd->ipath_kregs->kr_sendbuffererror);
+ sbuf[1] = ipath_read_kreg64(
+ dd, dd->ipath_kregs->kr_sendbuffererror + 1);
+ if (piobcnt > 128) {
+ sbuf[2] = ipath_read_kreg64(
+ dd, dd->ipath_kregs->kr_sendbuffererror + 2);
+ sbuf[3] = ipath_read_kreg64(
+ dd, dd->ipath_kregs->kr_sendbuffererror + 3);
+ }
+
+ if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
+ int i;
+
+ ipath_cdbg(PKT, "SendbufErrs %lx %lx ", sbuf[0], sbuf[1]);
+ if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
+ printk("%lx %lx ", sbuf[2], sbuf[3]);
+ for (i = 0; i < piobcnt; i++) {
+ if (test_bit(i, sbuf)) {
+ u32 __iomem *piobuf;
+ if (i < dd->ipath_piobcnt2k)
+ piobuf = (u32 __iomem *)
+ (dd->ipath_pio2kbase +
+ i * dd->ipath_palign);
+ else
+ piobuf = (u32 __iomem *)
+ (dd->ipath_pio4kbase +
+ (i - dd->ipath_piobcnt2k) *
+ dd->ipath_4kalign);
+
+ ipath_cdbg(PKT,
+ "PIObuf[%u] @%p pbc is %x; ",
+ i, piobuf, readl(piobuf));
+
+ ipath_disarm_piobufs(dd, i, 1);
+ }
+ }
+ if (ipath_debug & __IPATH_PKTDBG)
+ printk("\n");
+ }
+ if ((errs & (INFINIPATH_E_SDROPPEDDATAPKT |
+ INFINIPATH_E_SDROPPEDSMPPKT |
+ INFINIPATH_E_SMINPKTLEN)) &&
+ !(dd->ipath_flags & IPATH_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ ipath_dbg("Ignoring pktsend errors %llx, because not "
+ "yet active\n", (unsigned long long) errs);
+ ignore_this_time = INFINIPATH_E_SDROPPEDDATAPKT |
+ INFINIPATH_E_SDROPPEDSMPPKT |
+ INFINIPATH_E_SMINPKTLEN;
+ }
+
+ return ignore_this_time;
+}
+
+/* return the strings for the most common link states */
+static char *ib_linkstate(u32 linkstate)
+{
+ char *ret;
+
+ switch (linkstate) {
+ case IPATH_IBSTATE_INIT:
+ ret = "Init";
+ break;
+ case IPATH_IBSTATE_ARM:
+ ret = "Arm";
+ break;
+ case IPATH_IBSTATE_ACTIVE:
+ ret = "Active";
+ break;
+ default:
+ ret = "Down";
+ }
+
+ return ret;
+}
+
+static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
+ ipath_err_t errs, int noprint)
+{
+ u64 val;
+ u32 ltstate, lstate;
+
+ /*
+ * even if diags are enabled, we want to notice LINKINIT, etc.
+ * We just don't want to change the LED state, or
+ * dd->ipath_kregs->kr_ibcctrl
+ */
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+ lstate = val & IPATH_IBSTATE_MASK;
+ if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
+ lstate == IPATH_IBSTATE_ACTIVE) {
+ /*
+ * only print at SMA if there is a change, debug if not
+ * (sometimes we want to know that, usually not).
+ */
+ if (lstate == ((unsigned) dd->ipath_lastibcstat
+ & IPATH_IBSTATE_MASK)) {
+ ipath_dbg("Status change intr but no change (%s)\n",
+ ib_linkstate(lstate));
+ }
+ else
+ ipath_cdbg(SMA, "Unit %u link state %s, last "
+ "was %s\n", dd->ipath_unit,
+ ib_linkstate(lstate),
+ ib_linkstate((unsigned)
+ dd->ipath_lastibcstat
+ & IPATH_IBSTATE_MASK));
+ }
+ else {
+ lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
+ if (lstate == IPATH_IBSTATE_INIT ||
+ lstate == IPATH_IBSTATE_ARM ||
+ lstate == IPATH_IBSTATE_ACTIVE)
+ ipath_cdbg(SMA, "Unit %u link state down"
+ " (state 0x%x), from %s\n",
+ dd->ipath_unit,
+ (u32)val & IPATH_IBSTATE_MASK,
+ ib_linkstate(lstate));
+ else
+ ipath_cdbg(VERBOSE, "Unit %u link state changed "
+ "to 0x%x from down (%x)\n",
+ dd->ipath_unit, (u32) val, lstate);
+ }
+ ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
+ INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
+ lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
+ INFINIPATH_IBCS_LINKSTATE_MASK;
+
+ if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
+ ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
+ u32 last_ltstate;
+
+ /*
+ * Ignore cycling back and forth from Polling.Active
+ * to Polling.Quiet while waiting for the other end of
+ * the link to come up. We will cycle back and forth
+ * between them if no cable is plugged in,
+ * the other device is powered off or disabled, etc.
+ */
+ last_ltstate = (dd->ipath_lastibcstat >>
+ INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT)
+ & INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
+ if (last_ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE
+ || last_ltstate ==
+ INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
+ if (dd->ipath_ibpollcnt > 40) {
+ dd->ipath_flags |= IPATH_NOCABLE;
+ *dd->ipath_statusp |=
+ IPATH_STATUS_IB_NOCABLE;
+ } else
+ dd->ipath_ibpollcnt++;
+ goto skip_ibchange;
+ }
+ }
+ dd->ipath_ibpollcnt = 0; /* some state other than 2 or 3 */
+ ipath_stats.sps_iblink++;
+ if (ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
+ dd->ipath_flags |= IPATH_LINKDOWN;
+ dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
+ | IPATH_LINKACTIVE |
+ IPATH_LINKARMED);
+ *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+ if (!noprint) {
+ if (((dd->ipath_lastibcstat >>
+ INFINIPATH_IBCS_LINKSTATE_SHIFT) &
+ INFINIPATH_IBCS_LINKSTATE_MASK)
+ == INFINIPATH_IBCS_L_STATE_ACTIVE)
+ /* if from up to down be more vocal */
+ ipath_cdbg(SMA,
+ "Unit %u link now down (%s)\n",
+ dd->ipath_unit,
+ ipath_ibcstatus_str[ltstate]);
+ else
+ ipath_cdbg(VERBOSE, "Unit %u link is "
+ "down (%s)\n", dd->ipath_unit,
+ ipath_ibcstatus_str[ltstate]);
+ }
+
+ dd->ipath_f_setextled(dd, lstate, ltstate);
+ } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) {
+ dd->ipath_flags |= IPATH_LINKACTIVE;
+ dd->ipath_flags &=
+ ~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN |
+ IPATH_LINKARMED | IPATH_NOCABLE);
+ *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
+ *dd->ipath_statusp |=
+ IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
+ dd->ipath_f_setextled(dd, lstate, ltstate);
+
+ __ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP);
+ } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
+ /*
+ * set INIT and DOWN. Down is checked by most of the other
+ * code, but INIT is useful to know in a few places.
+ */
+ dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN;
+ dd->ipath_flags &=
+ ~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED
+ | IPATH_NOCABLE);
+ *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
+ | IPATH_STATUS_IB_READY);
+ dd->ipath_f_setextled(dd, lstate, ltstate);
+ } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) {
+ dd->ipath_flags |= IPATH_LINKARMED;
+ dd->ipath_flags &=
+ ~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT |
+ IPATH_LINKACTIVE | IPATH_NOCABLE);
+ *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
+ | IPATH_STATUS_IB_READY);
+ dd->ipath_f_setextled(dd, lstate, ltstate);
+ } else {
+ if (!noprint)
+ ipath_dbg("IBstatuschange unit %u: %s (%x)\n",
+ dd->ipath_unit,
+ ipath_ibcstatus_str[ltstate], ltstate);
+ }
+skip_ibchange:
+ dd->ipath_lastibcstat = val;
+}
+
+static void handle_supp_msgs(struct ipath_devdata *dd,
+ unsigned supp_msgs, char msg[512])
+{
+ /*
+ * Print the message unless it's ibc status change only, which
+ * happens so often we never want to count it.
+ */
+ if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
+ ipath_decode_err(msg, sizeof msg, dd->ipath_lasterror &
+ ~INFINIPATH_E_IBSTATUSCHANGED);
+ if (dd->ipath_lasterror &
+ ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
+ ipath_dev_err(dd, "Suppressed %u messages for "
+ "fast-repeating errors (%s) (%llx)\n",
+ supp_msgs, msg,
+ (unsigned long long)
+ dd->ipath_lasterror);
+ else {
+ /*
+ * rcvegrfull and rcvhdrqfull are "normal", for some
+ * types of processes (mostly benchmarks) that send
+ * huge numbers of messages, while not processing
+ * them. So only complain about these at debug
+ * level.
+ */
+ ipath_dbg("Suppressed %u messages for %s\n",
+ supp_msgs, msg);
+ }
+ }
+}
+
+static unsigned handle_frequent_errors(struct ipath_devdata *dd,
+ ipath_err_t errs, char msg[512],
+ int *noprint)
+{
+ unsigned long nc;
+ static unsigned long nextmsg_time;
+ static unsigned nmsgs, supp_msgs;
+
+ /*
+ * Throttle back "fast" messages to no more than 10 per 5 seconds.
+ * This isn't perfect, but it's a reasonable heuristic. If we get
+ * more than 10, give a 6x longer delay.
+ */
+ nc = jiffies;
+ if (nmsgs > 10) {
+ if (time_before(nc, nextmsg_time)) {
+ *noprint = 1;
+ if (!supp_msgs++)
+ nextmsg_time = nc + HZ * 3;
+ }
+ else if (supp_msgs) {
+ handle_supp_msgs(dd, supp_msgs, msg);
+ supp_msgs = 0;
+ nmsgs = 0;
+ }
+ }
+ else if (!nmsgs++ || time_after(nc, nextmsg_time))
+ nextmsg_time = nc + HZ / 2;
+
+ return supp_msgs;
+}
+
+static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+{
+ char msg[512];
+ u64 ignore_this_time = 0;
+ int i;
+ int chkerrpkts = 0, noprint = 0;
+ unsigned supp_msgs;
+
+ supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);
+
+ /*
+ * don't report errors that are masked (includes those always
+ * ignored)
+ */
+ errs &= ~dd->ipath_maskederrs;
+
+ /* do these first, they are most important */
+ if (errs & INFINIPATH_E_HARDWARE) {
+ /* reuse same msg buf */
+ dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
+ }
+
+ if (!noprint && (errs & ~infinipath_e_bitsextant))
+ ipath_dev_err(dd, "error interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (errs & ~infinipath_e_bitsextant));
+
+ if (errs & E_SUM_ERRS)
+ ignore_this_time = handle_e_sum_errs(dd, errs);
+
+ if (supp_msgs == 250000) {
+ /*
+ * It's not entirely reasonable assuming that the errors set
+ * in the last clear period are all responsible for the
+ * problem, but the alternative is to assume it's the only
+ * ones on this particular interrupt, which also isn't great
+ */
+ dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
+ ~dd->ipath_maskederrs);
+ ipath_decode_err(msg, sizeof msg,
+ (dd->ipath_maskederrs & ~dd->
+ ipath_ignorederrs));
+
+ if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
+ ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
+ ipath_dev_err(dd, "Disabling error(s) %llx because "
+ "occuring too frequently (%s)\n",
+ (unsigned long long)
+ (dd->ipath_maskederrs &
+ ~dd->ipath_ignorederrs), msg);
+ else {
+ /*
+ * rcvegrfull and rcvhdrqfull are "normal",
+ * for some types of processes (mostly benchmarks)
+ * that send huge numbers of messages, while not
+ * processing them. So only complain about
+ * these at debug level.
+ */
+ ipath_dbg("Disabling frequent queue full errors "
+ "(%s)\n", msg);
+ }
+
+ /*
+ * Re-enable the masked errors after around 3 minutes. in
+ * ipath_get_faststats(). If we have a series of fast
+ * repeating but different errors, the interval will keep
+ * stretching out, but that's OK, as that's pretty
+ * catastrophic.
+ */
+ dd->ipath_unmasktime = jiffies + HZ * 180;
+ }
+
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
+ if (ignore_this_time)
+ errs &= ~ignore_this_time;
+ if (errs & ~dd->ipath_lasterror) {
+ errs &= ~dd->ipath_lasterror;
+ /* never suppress duplicate hwerrors or ibstatuschange */
+ dd->ipath_lasterror |= errs &
+ ~(INFINIPATH_E_HARDWARE |
+ INFINIPATH_E_IBSTATUSCHANGED);
+ }
+ if (!errs)
+ return;
+
+ if (!noprint)
+ /*
+ * the ones we mask off are handled specially below or above
+ */
+ ipath_decode_err(msg, sizeof msg,
+ errs & ~(INFINIPATH_E_IBSTATUSCHANGED |
+ INFINIPATH_E_RRCVEGRFULL |
+ INFINIPATH_E_RRCVHDRFULL |
+ INFINIPATH_E_HARDWARE));
+ else
+ /* so we don't need if (!noprint) at strlcat's below */
+ *msg = 0;
+
+ if (errs & E_SUM_PKTERRS) {
+ ipath_stats.sps_pkterrs++;
+ chkerrpkts = 1;
+ }
+ if (errs & E_SUM_ERRS)
+ ipath_stats.sps_errs++;
+
+ if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
+ ipath_stats.sps_crcerrs++;
+ chkerrpkts = 1;
+ }
+
+ /*
+ * We don't want to print these two as they happen, or we can make
+ * the situation even worse, because it takes so long to print
+ * messages to serial consoles. Kernel ports get printed from
+ * fast_stats, no more than every 5 seconds, user ports get printed
+ * on close
+ */
+ if (errs & INFINIPATH_E_RRCVHDRFULL) {
+ int any;
+ u32 hd, tl;
+ ipath_stats.sps_hdrqfull++;
+ for (any = i = 0; i < dd->ipath_cfgports; i++) {
+ struct ipath_portdata *pd = dd->ipath_pd[i];
+ if (i == 0) {
+ hd = dd->ipath_port0head;
+ tl = (u32) le64_to_cpu(
+ *dd->ipath_hdrqtailptr);
+ } else if (pd && pd->port_cnt &&
+ pd->port_rcvhdrtail_kvaddr) {
+ /*
+ * don't report same point multiple times,
+ * except kernel
+ */
+ tl = (u32) * pd->port_rcvhdrtail_kvaddr;
+ if (tl == dd->ipath_lastrcvhdrqtails[i])
+ continue;
+ hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
+ i);
+ } else
+ continue;
+ if (hd == (tl + 1) ||
+ (!hd && tl == dd->ipath_hdrqlast)) {
+ dd->ipath_lastrcvhdrqtails[i] = tl;
+ pd->port_hdrqfull++;
+ if (i == 0)
+ chkerrpkts = 1;
+ }
+ }
+ }
+ if (errs & INFINIPATH_E_RRCVEGRFULL) {
+ /*
+ * since this is of less importance and not likely to
+ * happen without also getting hdrfull, only count
+ * occurrences; don't check each port (or even the kernel
+ * vs user)
+ */
+ ipath_stats.sps_etidfull++;
+ if (dd->ipath_port0head !=
+ (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
+ chkerrpkts = 1;
+ }
+
+ /*
+ * do this before IBSTATUSCHANGED, in case both bits set in a single
+ * interrupt; we want the STATUSCHANGE to "win", so we do our
+ * internal copy of state machine correctly
+ */
+ if (errs & INFINIPATH_E_RIBLOSTLINK) {
+ /*
+ * force through block below
+ */
+ errs |= INFINIPATH_E_IBSTATUSCHANGED;
+ ipath_stats.sps_iblink++;
+ dd->ipath_flags |= IPATH_LINKDOWN;
+ dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
+ | IPATH_LINKARMED | IPATH_LINKACTIVE);
+ *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+ if (!noprint) {
+ u64 st = ipath_read_kreg64(
+ dd, dd->ipath_kregs->kr_ibcstatus);
+
+ ipath_dbg("Lost link, link now down (%s)\n",
+ ipath_ibcstatus_str[st & 0xf]);
+ }
+ }
+ if (errs & INFINIPATH_E_IBSTATUSCHANGED)
+ handle_e_ibstatuschanged(dd, errs, noprint);
+
+ if (errs & INFINIPATH_E_RESET) {
+ if (!noprint)
+ ipath_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->ipath_flags &= ~IPATH_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+ *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
+ }
+
+ if (!noprint && *msg)
+ ipath_dev_err(dd, "%s error\n", msg);
+ if (dd->ipath_sma_state_wanted & dd->ipath_flags) {
+ ipath_cdbg(VERBOSE, "sma wanted state %x, iflags now %x, "
+ "waking\n", dd->ipath_sma_state_wanted,
+ dd->ipath_flags);
+ wake_up_interruptible(&ipath_sma_state_wait);
+ }
+
+ if (chkerrpkts)
+ /* process possible error packets in hdrq */
+ ipath_kreceive(dd);
+}
+
+/* this is separate to allow for better optimization of ipath_intr() */
+
+static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
+{
+ /*
+ * sometimes happen during driver init and unload, don't want
+ * to process any interrupts at that point
+ */
+
+ /* this is just a bandaid, not a fix, if something goes badly
+ * wrong */
+ if (++*unexpectp > 100) {
+ if (++*unexpectp > 105) {
+ /*
+ * ok, we must be taking somebody else's interrupts,
+ * due to a messed up mptable and/or PIRQ table, so
+ * unregister the interrupt. We've seen this during
+ * linuxbios development work, and it may happen in
+ * the future again.
+ */
+ if (dd->pcidev && dd->pcidev->irq) {
+ ipath_dev_err(dd, "Now %u unexpected "
+ "interrupts, unregistering "
+ "interrupt handler\n",
+ *unexpectp);
+ ipath_dbg("free_irq of irq %x\n",
+ dd->pcidev->irq);
+ free_irq(dd->pcidev->irq, dd);
+ }
+ }
+ if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) {
+ ipath_dev_err(dd, "%u unexpected interrupts, "
+ "disabling interrupts completely\n",
+ *unexpectp);
+ /*
+ * disable all interrupts, something is very wrong
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
+ 0ULL);
+ }
+ } else if (*unexpectp > 1)
+ ipath_dbg("Interrupt when not ready, should not happen, "
+ "ignoring\n");
+}
+
+static void ipath_bad_regread(struct ipath_devdata *dd)
+{
+ static int allbits;
+
+ /* separate routine, for better optimization of ipath_intr() */
+
+ /*
+ * We print the message and disable interrupts, in hope of
+ * having a better chance of debugging the problem.
+ */
+ ipath_dev_err(dd,
+ "Read of interrupt status failed (all bits set)\n");
+ if (allbits++) {
+ /* disable all interrupts, something is very wrong */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
+ if (allbits == 2) {
+ ipath_dev_err(dd, "Still bad interrupt status, "
+ "unregistering interrupt\n");
+ free_irq(dd->pcidev->irq, dd);
+ } else if (allbits > 2) {
+ if ((allbits % 10000) == 0)
+ printk(".");
+ } else
+ ipath_dev_err(dd, "Disabling interrupts, "
+ "multiple errors\n");
+ }
+}
+
+static void handle_port_pioavail(struct ipath_devdata *dd)
+{
+ u32 i;
+ /*
+ * start from port 1, since for now port 0 is never using
+ * wait_event for PIO
+ */
+ for (i = 1; dd->ipath_portpiowait && i < dd->ipath_cfgports; i++) {
+ struct ipath_portdata *pd = dd->ipath_pd[i];
+
+ if (pd && pd->port_cnt &&
+ dd->ipath_portpiowait & (1U << i)) {
+ clear_bit(i, &dd->ipath_portpiowait);
+ if (test_bit(IPATH_PORT_WAITING_PIO,
+ &pd->port_flag)) {
+ clear_bit(IPATH_PORT_WAITING_PIO,
+ &pd->port_flag);
+ wake_up_interruptible(&pd->port_wait);
+ }
+ }
+ }
+}
+
+static void handle_layer_pioavail(struct ipath_devdata *dd)
+{
+ int ret;
+
+ ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
+ if (ret > 0)
+ goto clear;
+
+ ret = __ipath_verbs_piobufavail(dd);
+ if (ret > 0)
+ goto clear;
+
+ return;
+clear:
+ set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+}
+
+static void handle_rcv(struct ipath_devdata *dd, u32 istat)
+{
+ u64 portr;
+ int i;
+ int rcvdint = 0;
+
+ portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
+ infinipath_i_rcvavail_mask)
+ | ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
+ infinipath_i_rcvurg_mask);
+ for (i = 0; i < dd->ipath_cfgports; i++) {
+ struct ipath_portdata *pd = dd->ipath_pd[i];
+ if (portr & (1 << i) && pd &&
+ pd->port_cnt) {
+ if (i == 0)
+ ipath_kreceive(dd);
+ else if (test_bit(IPATH_PORT_WAITING_RCV,
+ &pd->port_flag)) {
+ int rcbit;
+ clear_bit(IPATH_PORT_WAITING_RCV,
+ &pd->port_flag);
+ rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
+ clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
+ wake_up_interruptible(&pd->port_wait);
+ rcvdint = 1;
+ }
+ }
+ }
+ if (rcvdint) {
+ /* only want to take one interrupt, so turn off the rcv
+ * interrupt for all the ports that we did the wakeup on
+ * (but never for kernel port)
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+ }
+}
+
+irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
+{
+ struct ipath_devdata *dd = data;
+ u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
+ ipath_err_t estat = 0;
+ static unsigned unexpected = 0;
+ irqreturn_t ret;
+
+ if (unlikely(!istat)) {
+ ipath_stats.sps_nullintr++;
+ ret = IRQ_NONE; /* not our interrupt, or already handled */
+ goto bail;
+ }
+ if (unlikely(istat == -1)) {
+ ipath_bad_regread(dd);
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ ipath_stats.sps_ints++;
+
+ /*
+ * this needs to be flags&initted, not statusp, so we keep
+ * taking interrupts even after link goes down, etc.
+ * Also, we *must* clear the interrupt at some point, or we won't
+ * take it again, which can be real bad for errors, etc...
+ */
+
+ if (!(dd->ipath_flags & IPATH_INITTED)) {
+ ipath_bad_intr(dd, &unexpected);
+ ret = IRQ_NONE;
+ goto bail;
+ }
+ if (unexpected)
+ unexpected = 0;
+
+ ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
+
+ if (istat & ~infinipath_i_bitsextant)
+ ipath_dev_err(dd,
+ "interrupt with unknown interrupts %x set\n",
+ istat & (u32) ~ infinipath_i_bitsextant);
+
+ if (istat & INFINIPATH_I_ERROR) {
+ ipath_stats.sps_errints++;
+ estat = ipath_read_kreg64(dd,
+ dd->ipath_kregs->kr_errorstatus);
+ if (!estat)
+ dev_info(&dd->pcidev->dev, "error interrupt (%x), "
+ "but no error bits set!\n", istat);
+ else if (estat == -1LL)
+ /*
+ * should we try clearing all, or hope next read
+ * works?
+ */
+ ipath_dev_err(dd, "Read of error status failed "
+ "(all bits set); ignoring\n");
+ else
+ handle_errors(dd, estat);
+ }
+
+ if (istat & INFINIPATH_I_GPIO) {
+ if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) {
+ u32 gpiostatus;
+ gpiostatus = ipath_read_kreg32(
+ dd, dd->ipath_kregs->kr_gpio_status);
+ ipath_dbg("Unexpected GPIO interrupt bits %x\n",
+ gpiostatus);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
+ gpiostatus);
+ }
+ else {
+ /* Clear GPIO status bit 2 */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
+ (u64) (1 << 2));
+
+ /*
+ * Packets are available in the port 0 rcv queue.
+ * Eventually this needs to be generalized to check
+ * IPATH_GPIO_INTR, and the specific GPIO bit, if
+ * GPIO interrupts are used for anything else.
+ */
+ ipath_kreceive(dd);
+ }
+ }
+
+ /*
+ * clear the ones we will deal with on this round
+ * We clear it early, mostly for receive interrupts, so we
+ * know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
+
+ if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
+ clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+
+ if (dd->ipath_portpiowait)
+ handle_port_pioavail(dd);
+
+ handle_layer_pioavail(dd);
+ }
+
+ /*
+ * we check for both transition from empty to non-empty, and urgent
+ * packets (those with the interrupt bit set in the header)
+ */
+
+ if (istat & ((infinipath_i_rcvavail_mask <<
+ INFINIPATH_I_RCVAVAIL_SHIFT)
+ | (infinipath_i_rcvurg_mask <<
+ INFINIPATH_I_RCVURG_SHIFT)))
+ handle_rcv(dd, istat);
+
+ ret = IRQ_HANDLED;
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
new file mode 100644
index 0000000000000..159d0aed31a5b
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -0,0 +1,884 @@
+#ifndef _IPATH_KERNEL_H
+#define _IPATH_KERNEL_H
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This header file is the base header file for infinipath kernel code
+ * ipath_user.h serves a similar purpose for user code.
+ */
+
+#include <linux/interrupt.h>
+#include <asm/io.h>
+
+#include "ipath_common.h"
+#include "ipath_debug.h"
+#include "ipath_registers.h"
+
+/* only s/w major version of InfiniPath we can handle */
+#define IPATH_CHIP_VERS_MAJ 2U
+
+/* don't care about this except printing */
+#define IPATH_CHIP_VERS_MIN 0U
+
+/* temporary, maybe always */
+extern struct infinipath_stats ipath_stats;
+
+#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
+
+struct ipath_portdata {
+ void **port_rcvegrbuf;
+ dma_addr_t *port_rcvegrbuf_phys;
+ /* rcvhdrq base, needs mmap before useful */
+ void *port_rcvhdrq;
+ /* kernel virtual address where hdrqtail is updated */
+ u64 *port_rcvhdrtail_kvaddr;
+ /* page * used for uaddr */
+ struct page *port_rcvhdrtail_pagep;
+ /*
+ * temp buffer for expected send setup, allocated at open, instead
+ * of each setup call
+ */
+ void *port_tid_pg_list;
+ /* when waiting for rcv or pioavail */
+ wait_queue_head_t port_wait;
+ /*
+ * rcvegr bufs base, physical, must fit
+ * in 44 bits so 32 bit programs mmap64 44 bit works)
+ */
+ dma_addr_t port_rcvegr_phys;
+ /* mmap of hdrq, must fit in 44 bits */
+ dma_addr_t port_rcvhdrq_phys;
+ /*
+ * the actual user address that we ipath_mlock'ed, so we can
+ * ipath_munlock it at close
+ */
+ unsigned long port_rcvhdrtail_uaddr;
+ /*
+ * number of opens on this instance (0 or 1; ignoring forks, dup,
+ * etc. for now)
+ */
+ int port_cnt;
+ /*
+ * how much space to leave at start of eager TID entries for
+ * protocol use, on each TID
+ */
+ /* instead of calculating it */
+ unsigned port_port;
+ /* chip offset of PIO buffers for this port */
+ u32 port_piobufs;
+ /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
+ u32 port_rcvegrbuf_chunks;
+ /* how many egrbufs per chunk */
+ u32 port_rcvegrbufs_perchunk;
+ /* order for port_rcvegrbuf_pages */
+ size_t port_rcvegrbuf_size;
+ /* rcvhdrq size (for freeing) */
+ size_t port_rcvhdrq_size;
+ /* next expected TID to check when looking for free */
+ u32 port_tidcursor;
+ /* next expected TID to check */
+ unsigned long port_flag;
+ /* WAIT_RCV that timed out, no interrupt */
+ u32 port_rcvwait_to;
+ /* WAIT_PIO that timed out, no interrupt */
+ u32 port_piowait_to;
+ /* WAIT_RCV already happened, no wait */
+ u32 port_rcvnowait;
+ /* WAIT_PIO already happened, no wait */
+ u32 port_pionowait;
+ /* total number of rcvhdrqfull errors */
+ u32 port_hdrqfull;
+ /* pid of process using this port */
+ pid_t port_pid;
+ /* same size as task_struct .comm[] */
+ char port_comm[16];
+ /* pkeys set by this use of this port */
+ u16 port_pkeys[4];
+ /* so file ops can get at unit */
+ struct ipath_devdata *port_dd;
+};
+
+struct sk_buff;
+
+/*
+ * control information for layered drivers
+ */
+struct _ipath_layer {
+ void *l_arg;
+};
+
+/* Verbs layer interface */
+struct _verbs_layer {
+ void *l_arg;
+ struct timer_list l_timer;
+};
+
+struct ipath_devdata {
+ struct list_head ipath_list;
+
+ struct ipath_kregs const *ipath_kregs;
+ struct ipath_cregs const *ipath_cregs;
+
+ /* mem-mapped pointer to base of chip regs */
+ u64 __iomem *ipath_kregbase;
+ /* end of mem-mapped chip space; range checking */
+ u64 __iomem *ipath_kregend;
+ /* physical address of chip for io_remap, etc. */
+ unsigned long ipath_physaddr;
+ /* base of memory alloced for ipath_kregbase, for free */
+ u64 *ipath_kregalloc;
+ /*
+ * version of kregbase that doesn't have high bits set (for 32 bit
+ * programs, so mmap64 44 bit works)
+ */
+ u64 __iomem *ipath_kregvirt;
+ /*
+ * virtual address where port0 rcvhdrqtail updated for this unit.
+ * only written to by the chip, not the driver.
+ */
+ volatile __le64 *ipath_hdrqtailptr;
+ dma_addr_t ipath_dma_addr;
+ /* ipath_cfgports pointers */
+ struct ipath_portdata **ipath_pd;
+ /* sk_buffs used by port 0 eager receive queue */
+ struct sk_buff **ipath_port0_skbs;
+ /* kvirt address of 1st 2k pio buffer */
+ void __iomem *ipath_pio2kbase;
+ /* kvirt address of 1st 4k pio buffer */
+ void __iomem *ipath_pio4kbase;
+ /*
+ * points to area where PIOavail registers will be DMA'ed.
+ * Has to be on a page of it's own, because the page will be
+ * mapped into user program space. This copy is *ONLY* ever
+ * written by DMA, not by the driver! Need a copy per device
+ * when we get to multiple devices
+ */
+ volatile __le64 *ipath_pioavailregs_dma;
+ /* physical address where updates occur */
+ dma_addr_t ipath_pioavailregs_phys;
+ struct _ipath_layer ipath_layer;
+ /* setup intr */
+ int (*ipath_f_intrsetup)(struct ipath_devdata *);
+ /* setup on-chip bus config */
+ int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
+ /* hard reset chip */
+ int (*ipath_f_reset)(struct ipath_devdata *);
+ int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
+ size_t);
+ void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
+ void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
+ size_t);
+ void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
+ int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
+ int (*ipath_f_early_init)(struct ipath_devdata *);
+ void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
+ void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
+ u32, unsigned long);
+ void (*ipath_f_tidtemplate)(struct ipath_devdata *);
+ void (*ipath_f_cleanup)(struct ipath_devdata *);
+ void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
+ /* fill out chip-specific fields */
+ int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
+ struct _verbs_layer verbs_layer;
+ /* total dwords sent (summed from counter) */
+ u64 ipath_sword;
+ /* total dwords rcvd (summed from counter) */
+ u64 ipath_rword;
+ /* total packets sent (summed from counter) */
+ u64 ipath_spkts;
+ /* total packets rcvd (summed from counter) */
+ u64 ipath_rpkts;
+ /* ipath_statusp initially points to this. */
+ u64 _ipath_status;
+ /* GUID for this interface, in network order */
+ __be64 ipath_guid;
+ /*
+ * aggregrate of error bits reported since last cleared, for
+ * limiting of error reporting
+ */
+ ipath_err_t ipath_lasterror;
+ /*
+ * aggregrate of error bits reported since last cleared, for
+ * limiting of hwerror reporting
+ */
+ ipath_err_t ipath_lasthwerror;
+ /*
+ * errors masked because they occur too fast, also includes errors
+ * that are always ignored (ipath_ignorederrs)
+ */
+ ipath_err_t ipath_maskederrs;
+ /* time in jiffies at which to re-enable maskederrs */
+ unsigned long ipath_unmasktime;
+ /*
+ * errors always ignored (masked), at least for a given
+ * chip/device, because they are wrong or not useful
+ */
+ ipath_err_t ipath_ignorederrs;
+ /* count of egrfull errors, combined for all ports */
+ u64 ipath_last_tidfull;
+ /* for ipath_qcheck() */
+ u64 ipath_lastport0rcv_cnt;
+ /* template for writing TIDs */
+ u64 ipath_tidtemplate;
+ /* value to write to free TIDs */
+ u64 ipath_tidinvalid;
+ /* PE-800 rcv interrupt setup */
+ u64 ipath_rhdrhead_intr_off;
+
+ /* size of memory at ipath_kregbase */
+ u32 ipath_kregsize;
+ /* number of registers used for pioavail */
+ u32 ipath_pioavregs;
+ /* IPATH_POLL, etc. */
+ u32 ipath_flags;
+ /* ipath_flags sma is waiting for */
+ u32 ipath_sma_state_wanted;
+ /* last buffer for user use, first buf for kernel use is this
+ * index. */
+ u32 ipath_lastport_piobuf;
+ /* is a stats timer active */
+ u32 ipath_stats_timer_active;
+ /* dwords sent read from counter */
+ u32 ipath_lastsword;
+ /* dwords received read from counter */
+ u32 ipath_lastrword;
+ /* sent packets read from counter */
+ u32 ipath_lastspkts;
+ /* received packets read from counter */
+ u32 ipath_lastrpkts;
+ /* pio bufs allocated per port */
+ u32 ipath_pbufsport;
+ /*
+ * number of ports configured as max; zero is set to number chip
+ * supports, less gives more pio bufs/port, etc.
+ */
+ u32 ipath_cfgports;
+ /* port0 rcvhdrq head offset */
+ u32 ipath_port0head;
+ /* count of port 0 hdrqfull errors */
+ u32 ipath_p0_hdrqfull;
+
+ /*
+ * (*cfgports) used to suppress multiple instances of same
+ * port staying stuck at same point
+ */
+ u32 *ipath_lastrcvhdrqtails;
+ /*
+ * (*cfgports) used to suppress multiple instances of same
+ * port staying stuck at same point
+ */
+ u32 *ipath_lastegrheads;
+ /*
+ * index of last piobuffer we used. Speeds up searching, by
+ * starting at this point. Doesn't matter if multiple cpu's use and
+ * update, last updater is only write that matters. Whenever it
+ * wraps, we update shadow copies. Need a copy per device when we
+ * get to multiple devices
+ */
+ u32 ipath_lastpioindex;
+ /* max length of freezemsg */
+ u32 ipath_freezelen;
+ /*
+ * consecutive times we wanted a PIO buffer but were unable to
+ * get one
+ */
+ u32 ipath_consec_nopiobuf;
+ /*
+ * hint that we should update ipath_pioavailshadow before
+ * looking for a PIO buffer
+ */
+ u32 ipath_upd_pio_shadow;
+ /* so we can rewrite it after a chip reset */
+ u32 ipath_pcibar0;
+ /* so we can rewrite it after a chip reset */
+ u32 ipath_pcibar1;
+ /* sequential tries for SMA send and no bufs */
+ u32 ipath_nosma_bufs;
+ /* duration (seconds) ipath_nosma_bufs set */
+ u32 ipath_nosma_secs;
+
+ /* HT/PCI Vendor ID (here for NodeInfo) */
+ u16 ipath_vendorid;
+ /* HT/PCI Device ID (here for NodeInfo) */
+ u16 ipath_deviceid;
+ /* offset in HT config space of slave/primary interface block */
+ u8 ipath_ht_slave_off;
+ /* for write combining settings */
+ unsigned long ipath_wc_cookie;
+ /* ref count for each pkey */
+ atomic_t ipath_pkeyrefs[4];
+ /* shadow copy of all exptids physaddr; used only by funcsim */
+ u64 *ipath_tidsimshadow;
+ /* shadow copy of struct page *'s for exp tid pages */
+ struct page **ipath_pageshadow;
+ /* lock to workaround chip bug 9437 */
+ spinlock_t ipath_tid_lock;
+
+ /*
+ * IPATH_STATUS_*,
+ * this address is mapped readonly into user processes so they can
+ * get status cheaply, whenever they want.
+ */
+ u64 *ipath_statusp;
+ /* freeze msg if hw error put chip in freeze */
+ char *ipath_freezemsg;
+ /* pci access data structure */
+ struct pci_dev *pcidev;
+ struct cdev *cdev;
+ struct class_device *class_dev;
+ /* timer used to prevent stats overflow, error throttling, etc. */
+ struct timer_list ipath_stats_timer;
+ /* check for stale messages in rcv queue */
+ /* only allow one intr at a time. */
+ unsigned long ipath_rcv_pending;
+
+ /*
+ * Shadow copies of registers; size indicates read access size.
+ * Most of them are readonly, but some are write-only register,
+ * where we manipulate the bits in the shadow copy, and then write
+ * the shadow copy to infinipath.
+ *
+ * We deliberately make most of these 32 bits, since they have
+ * restricted range. For any that we read, we won't to generate 32
+ * bit accesses, since Opteron will generate 2 separate 32 bit HT
+ * transactions for a 64 bit read, and we want to avoid unnecessary
+ * HT transactions.
+ */
+
+ /* This is the 64 bit group */
+
+ /*
+ * shadow of pioavail, check to be sure it's large enough at
+ * init time.
+ */
+ unsigned long ipath_pioavailshadow[8];
+ /* shadow of kr_gpio_out, for rmw ops */
+ u64 ipath_gpio_out;
+ /* kr_revision shadow */
+ u64 ipath_revision;
+ /*
+ * shadow of ibcctrl, for interrupt handling of link changes,
+ * etc.
+ */
+ u64 ipath_ibcctrl;
+ /*
+ * last ibcstatus, to suppress "duplicate" status change messages,
+ * mostly from 2 to 3
+ */
+ u64 ipath_lastibcstat;
+ /* hwerrmask shadow */
+ ipath_err_t ipath_hwerrmask;
+ /* interrupt config reg shadow */
+ u64 ipath_intconfig;
+ /* kr_sendpiobufbase value */
+ u64 ipath_piobufbase;
+
+ /* these are the "32 bit" regs */
+
+ /*
+ * number of GUIDs in the flash for this interface; may need some
+ * rethinking for setting on other ifaces
+ */
+ u32 ipath_nguid;
+ /*
+ * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+ * all expect bit fields to be "unsigned long"
+ */
+ /* shadow kr_rcvctrl */
+ unsigned long ipath_rcvctrl;
+ /* shadow kr_sendctrl */
+ unsigned long ipath_sendctrl;
+
+ /* value we put in kr_rcvhdrcnt */
+ u32 ipath_rcvhdrcnt;
+ /* value we put in kr_rcvhdrsize */
+ u32 ipath_rcvhdrsize;
+ /* value we put in kr_rcvhdrentsize */
+ u32 ipath_rcvhdrentsize;
+ /* offset of last entry in rcvhdrq */
+ u32 ipath_hdrqlast;
+ /* kr_portcnt value */
+ u32 ipath_portcnt;
+ /* kr_pagealign value */
+ u32 ipath_palign;
+ /* number of "2KB" PIO buffers */
+ u32 ipath_piobcnt2k;
+ /* size in bytes of "2KB" PIO buffers */
+ u32 ipath_piosize2k;
+ /* number of "4KB" PIO buffers */
+ u32 ipath_piobcnt4k;
+ /* size in bytes of "4KB" PIO buffers */
+ u32 ipath_piosize4k;
+ /* kr_rcvegrbase value */
+ u32 ipath_rcvegrbase;
+ /* kr_rcvegrcnt value */
+ u32 ipath_rcvegrcnt;
+ /* kr_rcvtidbase value */
+ u32 ipath_rcvtidbase;
+ /* kr_rcvtidcnt value */
+ u32 ipath_rcvtidcnt;
+ /* kr_sendregbase */
+ u32 ipath_sregbase;
+ /* kr_userregbase */
+ u32 ipath_uregbase;
+ /* kr_counterregbase */
+ u32 ipath_cregbase;
+ /* shadow the control register contents */
+ u32 ipath_control;
+ /* shadow the gpio output contents */
+ u32 ipath_extctrl;
+ /* PCI revision register (HTC rev on FPGA) */
+ u32 ipath_pcirev;
+
+ /* chip address space used by 4k pio buffers */
+ u32 ipath_4kalign;
+ /* The MTU programmed for this unit */
+ u32 ipath_ibmtu;
+ /*
+ * The max size IB packet, included IB headers that we can send.
+ * Starts same as ipath_piosize, but is affected when ibmtu is
+ * changed, or by size of eager buffers
+ */
+ u32 ipath_ibmaxlen;
+ /*
+ * ibmaxlen at init time, limited by chip and by receive buffer
+ * size. Not changed after init.
+ */
+ u32 ipath_init_ibmaxlen;
+ /* size of each rcvegrbuffer */
+ u32 ipath_rcvegrbufsize;
+ /* width (2,4,8,16,32) from HT config reg */
+ u32 ipath_htwidth;
+ /* HT speed (200,400,800,1000) from HT config */
+ u32 ipath_htspeed;
+ /* ports waiting for PIOavail intr */
+ unsigned long ipath_portpiowait;
+ /*
+ * number of sequential ibcstatus change for polling active/quiet
+ * (i.e., link not coming up).
+ */
+ u32 ipath_ibpollcnt;
+ /* low and high portions of MSI capability/vector */
+ u32 ipath_msi_lo;
+ /* saved after PCIe init for restore after reset */
+ u32 ipath_msi_hi;
+ /* MSI data (vector) saved for restore */
+ u16 ipath_msi_data;
+ /* MLID programmed for this instance */
+ u16 ipath_mlid;
+ /* LID programmed for this instance */
+ u16 ipath_lid;
+ /* list of pkeys programmed; 0 if not set */
+ u16 ipath_pkeys[4];
+ /* ASCII serial number, from flash */
+ u8 ipath_serial[12];
+ /* human readable board version */
+ u8 ipath_boardversion[80];
+ /* chip major rev, from ipath_revision */
+ u8 ipath_majrev;
+ /* chip minor rev, from ipath_revision */
+ u8 ipath_minrev;
+ /* board rev, from ipath_revision */
+ u8 ipath_boardrev;
+ /* unit # of this chip, if present */
+ int ipath_unit;
+ /* saved for restore after reset */
+ u8 ipath_pci_cacheline;
+ /* LID mask control */
+ u8 ipath_lmc;
+};
+
+extern volatile __le64 *ipath_port0_rcvhdrtail;
+extern dma_addr_t ipath_port0_rcvhdrtail_dma;
+
+#define IPATH_PORT0_RCVHDRTAIL_SIZE PAGE_SIZE
+
+extern struct list_head ipath_dev_list;
+extern spinlock_t ipath_devs_lock;
+extern struct ipath_devdata *ipath_lookup(int unit);
+
+extern u16 ipath_layer_rcv_opcode;
+extern int ipath_verbs_registered;
+extern int __ipath_layer_intr(struct ipath_devdata *, u32);
+extern int ipath_layer_intr(struct ipath_devdata *, u32);
+extern int __ipath_layer_rcv(struct ipath_devdata *, void *,
+ struct sk_buff *);
+extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *);
+extern int __ipath_verbs_piobufavail(struct ipath_devdata *);
+extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
+
+void ipath_layer_add(struct ipath_devdata *);
+void ipath_layer_del(struct ipath_devdata *);
+
+int ipath_init_chip(struct ipath_devdata *, int);
+int ipath_enable_wc(struct ipath_devdata *dd);
+void ipath_disable_wc(struct ipath_devdata *dd);
+int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
+void ipath_shutdown_device(struct ipath_devdata *);
+
+struct file_operations;
+int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
+ struct cdev **cdevp, struct class_device **class_devp);
+void ipath_cdev_cleanup(struct cdev **cdevp,
+ struct class_device **class_devp);
+
+int ipath_diag_init(void);
+void ipath_diag_cleanup(void);
+void ipath_diag_bringup_link(struct ipath_devdata *);
+
+extern wait_queue_head_t ipath_sma_state_wait;
+
+int ipath_user_add(struct ipath_devdata *dd);
+void ipath_user_del(struct ipath_devdata *dd);
+
+struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
+
+extern int ipath_diag_inuse;
+
+irqreturn_t ipath_intr(int irq, void *devid, struct pt_regs *regs);
+void ipath_decode_err(char *buf, size_t blen, ipath_err_t err);
+#if __IPATH_INFO || __IPATH_DBG
+extern const char *ipath_ibcstatus_str[];
+#endif
+
+/* clean up any per-chip chip-specific stuff */
+void ipath_chip_cleanup(struct ipath_devdata *);
+/* clean up any chip type-specific stuff */
+void ipath_chip_done(void);
+
+/* check to see if we have to force ordering for write combining */
+int ipath_unordered_wc(void);
+
+void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
+ unsigned cnt);
+
+int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
+void ipath_free_pddata(struct ipath_devdata *, u32, int);
+
+int ipath_parse_ushort(const char *str, unsigned short *valp);
+
+int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
+void ipath_set_ib_lstate(struct ipath_devdata *, int);
+void ipath_kreceive(struct ipath_devdata *);
+int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
+int ipath_reset_device(int);
+void ipath_get_faststats(unsigned long);
+
+/* for use in system calls, where we want to know device type, etc. */
+#define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data)
+
+/*
+ * values for ipath_flags
+ */
+/* The chip is up and initted */
+#define IPATH_INITTED 0x2
+ /* set if any user code has set kr_rcvhdrsize */
+#define IPATH_RCVHDRSZ_SET 0x4
+ /* The chip is present and valid for accesses */
+#define IPATH_PRESENT 0x8
+ /* HT link0 is only 8 bits wide, ignore upper byte crc
+ * errors, etc. */
+#define IPATH_8BIT_IN_HT0 0x10
+ /* HT link1 is only 8 bits wide, ignore upper byte crc
+ * errors, etc. */
+#define IPATH_8BIT_IN_HT1 0x20
+ /* The link is down */
+#define IPATH_LINKDOWN 0x40
+ /* The link level is up (0x11) */
+#define IPATH_LINKINIT 0x80
+ /* The link is in the armed (0x21) state */
+#define IPATH_LINKARMED 0x100
+ /* The link is in the active (0x31) state */
+#define IPATH_LINKACTIVE 0x200
+ /* link current state is unknown */
+#define IPATH_LINKUNK 0x400
+ /* no IB cable, or no device on IB cable */
+#define IPATH_NOCABLE 0x4000
+ /* Supports port zero per packet receive interrupts via
+ * GPIO */
+#define IPATH_GPIO_INTR 0x8000
+ /* uses the coded 4byte TID, not 8 byte */
+#define IPATH_4BYTE_TID 0x10000
+ /* packet/word counters are 32 bit, else those 4 counters
+ * are 64bit */
+#define IPATH_32BITCOUNTERS 0x20000
+ /* can miss port0 rx interrupts */
+#define IPATH_POLL_RX_INTR 0x40000
+#define IPATH_DISABLED 0x80000 /* administratively disabled */
+
+/* portdata flag bit offsets */
+ /* waiting for a packet to arrive */
+#define IPATH_PORT_WAITING_RCV 2
+ /* waiting for a PIO buffer to be available */
+#define IPATH_PORT_WAITING_PIO 3
+
+/* free up any allocated data at closes */
+void ipath_free_data(struct ipath_portdata *dd);
+int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
+int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
+u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
+/* init PE-800-specific func */
+void ipath_init_pe800_funcs(struct ipath_devdata *);
+/* init HT-400-specific func */
+void ipath_init_ht400_funcs(struct ipath_devdata *);
+void ipath_get_guid(struct ipath_devdata *);
+u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
+
+/*
+ * number of words used for protocol header if not set by ipath_userinit();
+ */
+#define IPATH_DFLT_RCVHDRSIZE 9
+
+#define IPATH_MDIO_CMD_WRITE 1
+#define IPATH_MDIO_CMD_READ 2
+#define IPATH_MDIO_CLD_DIV 25 /* to get 2.5 Mhz mdio clock */
+#define IPATH_MDIO_CMDVALID 0x40000000 /* bit 30 */
+#define IPATH_MDIO_DATAVALID 0x80000000 /* bit 31 */
+#define IPATH_MDIO_CTRL_STD 0x0
+
+static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
+{
+ return (((u64) IPATH_MDIO_CLD_DIV) << 32) |
+ (cmd << 26) |
+ (dev << 21) |
+ (reg << 16) |
+ (data & 0xFFFF);
+}
+
+ /* signal and fifo status, in bank 31 */
+#define IPATH_MDIO_CTRL_XGXS_REG_8 0x8
+ /* controls loopback, redundancy */
+#define IPATH_MDIO_CTRL_8355_REG_1 0x10
+ /* premph, encdec, etc. */
+#define IPATH_MDIO_CTRL_8355_REG_2 0x11
+ /* Kchars, etc. */
+#define IPATH_MDIO_CTRL_8355_REG_6 0x15
+#define IPATH_MDIO_CTRL_8355_REG_9 0x18
+#define IPATH_MDIO_CTRL_8355_REG_10 0x1D
+
+int ipath_get_user_pages(unsigned long, size_t, struct page **);
+int ipath_get_user_pages_nocopy(unsigned long, struct page **);
+void ipath_release_user_pages(struct page **, size_t);
+void ipath_release_user_pages_on_close(struct page **, size_t);
+int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
+int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
+
+/* these are used for the registers that vary with port */
+void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
+ unsigned, u64);
+u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
+ unsigned);
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner. It also gives us some error
+ * checking. 64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible. User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/*
+ * At the moment, none of the s-registers are writable, so no
+ * ipath_write_sreg(), and none of the c-registers are writable, so no
+ * ipath_write_creg().
+ */
+
+/**
+ * ipath_read_ureg32 - read 32-bit virtualized per-port register
+ * @dd: device
+ * @regno: register number
+ * @port: port number
+ *
+ * Return the contents of a register that is virtualized to be per port.
+ * Prints a debug message and returns -1 on errors (not distinguishable from
+ * valid contents at runtime; we may add a separate error variable at some
+ * point).
+ *
+ * This is normally not used by the kernel, but may be for debugging, and
+ * has a different implementation than user mode, which is why it's not in
+ * _common.h.
+ */
+static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
+ ipath_ureg regno, int port)
+{
+ if (!dd->ipath_kregbase)
+ return 0;
+
+ return readl(regno + (u64 __iomem *)
+ (dd->ipath_uregbase +
+ (char __iomem *)dd->ipath_kregbase +
+ dd->ipath_palign * port));
+}
+
+/**
+ * ipath_write_ureg - write 32-bit virtualized per-port register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @port: port
+ *
+ * Write the contents of a register that is virtualized to be per port.
+ */
+static inline void ipath_write_ureg(const struct ipath_devdata *dd,
+ ipath_ureg regno, u64 value, int port)
+{
+ u64 __iomem *ubase = (u64 __iomem *)
+ (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
+ dd->ipath_palign * port);
+ if (dd->ipath_kregbase)
+ writeq(value, &ubase[regno]);
+}
+
+static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
+ ipath_kreg regno)
+{
+ if (!dd->ipath_kregbase)
+ return -1;
+ return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
+}
+
+static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
+ ipath_kreg regno)
+{
+ if (!dd->ipath_kregbase)
+ return -1;
+
+ return readq(&dd->ipath_kregbase[regno]);
+}
+
+static inline void ipath_write_kreg(const struct ipath_devdata *dd,
+ ipath_kreg regno, u64 value)
+{
+ if (dd->ipath_kregbase)
+ writeq(value, &dd->ipath_kregbase[regno]);
+}
+
+static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
+ ipath_sreg regno)
+{
+ if (!dd->ipath_kregbase)
+ return 0;
+
+ return readq(regno + (u64 __iomem *)
+ (dd->ipath_cregbase +
+ (char __iomem *)dd->ipath_kregbase));
+}
+
+static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
+ ipath_sreg regno)
+{
+ if (!dd->ipath_kregbase)
+ return 0;
+ return readl(regno + (u64 __iomem *)
+ (dd->ipath_cregbase +
+ (char __iomem *)dd->ipath_kregbase));
+}
+
+/*
+ * sysfs interface.
+ */
+
+struct device_driver;
+
+extern const char ipath_core_version[];
+
+int ipath_driver_create_group(struct device_driver *);
+void ipath_driver_remove_group(struct device_driver *);
+
+int ipath_device_create_group(struct device *, struct ipath_devdata *);
+void ipath_device_remove_group(struct device *, struct ipath_devdata *);
+int ipath_expose_reset(struct device *);
+
+int ipath_init_ipathfs(void);
+void ipath_exit_ipathfs(void);
+int ipathfs_add_device(struct ipath_devdata *);
+int ipathfs_remove_device(struct ipath_devdata *);
+
+/*
+ * Flush write combining store buffers (if present) and perform a write
+ * barrier.
+ */
+#if defined(CONFIG_X86_64)
+#define ipath_flush_wc() asm volatile("sfence" ::: "memory")
+#else
+#define ipath_flush_wc() wmb()
+#endif
+
+extern unsigned ipath_debug; /* debugging bit mask */
+
+const char *ipath_get_unit_name(int unit);
+
+extern struct mutex ipath_mutex;
+
+#define IPATH_DRV_NAME "ipath_core"
+#define IPATH_MAJOR 233
+#define IPATH_SMA_MINOR 128
+#define IPATH_DIAG_MINOR 129
+#define IPATH_NMINORS 130
+
+#define ipath_dev_err(dd,fmt,...) \
+ do { \
+ const struct ipath_devdata *__dd = (dd); \
+ if (__dd->pcidev) \
+ dev_err(&__dd->pcidev->dev, "%s: " fmt, \
+ ipath_get_unit_name(__dd->ipath_unit), \
+ ##__VA_ARGS__); \
+ else \
+ printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
+ ipath_get_unit_name(__dd->ipath_unit), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#if _IPATH_DEBUGGING
+
+# define __IPATH_DBG_WHICH(which,fmt,...) \
+ do { \
+ if(unlikely(ipath_debug&(which))) \
+ printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
+ __func__,##__VA_ARGS__); \
+ } while(0)
+
+# define ipath_dbg(fmt,...) \
+ __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
+# define ipath_cdbg(which,fmt,...) \
+ __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
+
+#else /* ! _IPATH_DEBUGGING */
+
+# define ipath_dbg(fmt,...)
+# define ipath_cdbg(which,fmt,...)
+
+#endif /* _IPATH_DEBUGGING */
+
+#endif /* _IPATH_KERNEL_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
new file mode 100644
index 0000000000000..aa33b0e9f2f63
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <asm/io.h>
+
+#include "ipath_verbs.h"
+
+/**
+ * ipath_alloc_lkey - allocate an lkey
+ * @rkt: lkey table in which to allocate the lkey
+ * @mr: memory region that this lkey protects
+ *
+ * Returns 1 if successful, otherwise returns 0.
+ */
+
+int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
+{
+ unsigned long flags;
+ u32 r;
+ u32 n;
+ int ret;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+
+ /* Find the next available LKEY */
+ r = n = rkt->next;
+ for (;;) {
+ if (rkt->table[r] == NULL)
+ break;
+ r = (r + 1) & (rkt->max - 1);
+ if (r == n) {
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ _VERBS_INFO("LKEY table full\n");
+ ret = 0;
+ goto bail;
+ }
+ }
+ rkt->next = (r + 1) & (rkt->max - 1);
+ /*
+ * Make sure lkey is never zero which is reserved to indicate an
+ * unrestricted LKEY.
+ */
+ rkt->gen++;
+ mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) |
+ ((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen)
+ << 8);
+ if (mr->lkey == 0) {
+ mr->lkey |= 1 << 8;
+ rkt->gen++;
+ }
+ rkt->table[r] = mr;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_free_lkey - free an lkey
+ * @rkt: table from which to free the lkey
+ * @lkey: lkey id to free
+ */
+void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
+{
+ unsigned long flags;
+ u32 r;
+
+ if (lkey == 0)
+ return;
+ r = lkey >> (32 - ib_ipath_lkey_table_size);
+ spin_lock_irqsave(&rkt->lock, flags);
+ rkt->table[r] = NULL;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+}
+
+/**
+ * ipath_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @isge: outgoing internal SGE
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Return 1 if valid and successful, otherwise returns 0.
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ */
+int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
+ struct ib_sge *sge, int acc)
+{
+ struct ipath_mregion *mr;
+ size_t off;
+ int ret;
+
+ /*
+ * We use LKEY == zero to mean a physical kmalloc() address.
+ * This is a bit of a hack since we rely on dma_map_single()
+ * being reversible by calling bus_to_virt().
+ */
+ if (sge->lkey == 0) {
+ isge->mr = NULL;
+ isge->vaddr = bus_to_virt(sge->addr);
+ isge->length = sge->length;
+ isge->sge_length = sge->length;
+ ret = 1;
+ goto bail;
+ }
+ spin_lock(&rkt->lock);
+ mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
+ spin_unlock(&rkt->lock);
+ if (unlikely(mr == NULL || mr->lkey != sge->lkey)) {
+ ret = 0;
+ goto bail;
+ }
+
+ off = sge->addr - mr->user_base;
+ if (unlikely(sge->addr < mr->user_base ||
+ off + sge->length > mr->length ||
+ (mr->access_flags & acc) != acc)) {
+ ret = 0;
+ goto bail;
+ }
+
+ off += mr->offset;
+ isge->mr = mr;
+ isge->m = 0;
+ isge->n = 0;
+ while (off >= mr->map[isge->m]->segs[isge->n].length) {
+ off -= mr->map[isge->m]->segs[isge->n].length;
+ isge->n++;
+ if (isge->n >= IPATH_SEGSZ) {
+ isge->m++;
+ isge->n = 0;
+ }
+ }
+ isge->vaddr = mr->map[isge->m]->segs[isge->n].vaddr + off;
+ isge->length = mr->map[isge->m]->segs[isge->n].length - off;
+ isge->sge_length = sge->length;
+
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_rkey_ok - check the IB virtual address, length, and RKEY
+ * @dev: infiniband device
+ * @ss: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return 1 if successful, otherwise 0.
+ *
+ * The QP r_rq.lock should be held.
+ */
+int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
+ u32 len, u64 vaddr, u32 rkey, int acc)
+{
+ struct ipath_lkey_table *rkt = &dev->lk_table;
+ struct ipath_sge *sge = &ss->sge;
+ struct ipath_mregion *mr;
+ size_t off;
+ int ret;
+
+ spin_lock(&rkt->lock);
+ mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
+ spin_unlock(&rkt->lock);
+ if (unlikely(mr == NULL || mr->lkey != rkey)) {
+ ret = 0;
+ goto bail;
+ }
+
+ off = vaddr - mr->iova;
+ if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+ (mr->access_flags & acc) == 0)) {
+ ret = 0;
+ goto bail;
+ }
+
+ off += mr->offset;
+ sge->mr = mr;
+ sge->m = 0;
+ sge->n = 0;
+ while (off >= mr->map[sge->m]->segs[sge->n].length) {
+ off -= mr->map[sge->m]->segs[sge->n].length;
+ sge->n++;
+ if (sge->n >= IPATH_SEGSZ) {
+ sge->m++;
+ sge->n = 0;
+ }
+ }
+ sge->vaddr = mr->map[sge->m]->segs[sge->n].vaddr + off;
+ sge->length = mr->map[sge->m]->segs[sge->n].length - off;
+ sge->sge_length = len;
+ ss->sg_list = NULL;
+ ss->num_sge = 1;
+
+ ret = 1;
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
new file mode 100644
index 0000000000000..2cabf63405721
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -0,0 +1,1515 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * These are the routines used by layered drivers, currently just the
+ * layered ethernet driver and verbs layer.
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <asm/byteorder.h>
+
+#include "ipath_kernel.h"
+#include "ips_common.h"
+#include "ipath_layer.h"
+
+/* Acquire before ipath_devs_lock. */
+static DEFINE_MUTEX(ipath_layer_mutex);
+
+u16 ipath_layer_rcv_opcode;
+static int (*layer_intr)(void *, u32);
+static int (*layer_rcv)(void *, void *, struct sk_buff *);
+static int (*layer_rcv_lid)(void *, void *);
+static int (*verbs_piobufavail)(void *);
+static void (*verbs_rcv)(void *, void *, void *, u32);
+int ipath_verbs_registered;
+
+static void *(*layer_add_one)(int, struct ipath_devdata *);
+static void (*layer_remove_one)(void *);
+static void *(*verbs_add_one)(int, struct ipath_devdata *);
+static void (*verbs_remove_one)(void *);
+static void (*verbs_timer_cb)(void *);
+
+int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
+{
+ int ret = -ENODEV;
+
+ if (dd->ipath_layer.l_arg && layer_intr)
+ ret = layer_intr(dd->ipath_layer.l_arg, arg);
+
+ return ret;
+}
+
+int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
+{
+ int ret;
+
+ mutex_lock(&ipath_layer_mutex);
+
+ ret = __ipath_layer_intr(dd, arg);
+
+ mutex_unlock(&ipath_layer_mutex);
+
+ return ret;
+}
+
+int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
+ struct sk_buff *skb)
+{
+ int ret = -ENODEV;
+
+ if (dd->ipath_layer.l_arg && layer_rcv)
+ ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
+
+ return ret;
+}
+
+int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
+{
+ int ret = -ENODEV;
+
+ if (dd->ipath_layer.l_arg && layer_rcv_lid)
+ ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
+
+ return ret;
+}
+
+int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
+{
+ int ret = -ENODEV;
+
+ if (dd->verbs_layer.l_arg && verbs_piobufavail)
+ ret = verbs_piobufavail(dd->verbs_layer.l_arg);
+
+ return ret;
+}
+
+int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
+ u32 tlen)
+{
+ int ret = -ENODEV;
+
+ if (dd->verbs_layer.l_arg && verbs_rcv) {
+ verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
+{
+ u32 lstate;
+ int ret;
+
+ switch (newstate) {
+ case IPATH_IB_LINKDOWN:
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
+ INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case IPATH_IB_LINKDOWN_SLEEP:
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
+ INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case IPATH_IB_LINKDOWN_DISABLE:
+ ipath_set_ib_lstate(dd,
+ INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
+ INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case IPATH_IB_LINKINIT:
+ if (dd->ipath_flags & IPATH_LINKINIT) {
+ ret = 0;
+ goto bail;
+ }
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
+ INFINIPATH_IBCC_LINKCMD_SHIFT);
+ lstate = IPATH_LINKINIT;
+ break;
+
+ case IPATH_IB_LINKARM:
+ if (dd->ipath_flags & IPATH_LINKARMED) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(dd->ipath_flags &
+ (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
+ INFINIPATH_IBCC_LINKCMD_SHIFT);
+ /*
+ * Since the port can transition to ACTIVE by receiving
+ * a non VL 15 packet, wait for either state.
+ */
+ lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
+ break;
+
+ case IPATH_IB_LINKACTIVE:
+ if (dd->ipath_flags & IPATH_LINKACTIVE) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(dd->ipath_flags & IPATH_LINKARMED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
+ INFINIPATH_IBCC_LINKCMD_SHIFT);
+ lstate = IPATH_LINKACTIVE;
+ break;
+
+ default:
+ ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
+ ret = -EINVAL;
+ goto bail;
+ }
+ ret = ipath_wait_linkstate(dd, lstate, 2000);
+
+bail:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
+
+/**
+ * ipath_layer_set_mtu - set the MTU
+ * @dd: the infinipath device
+ * @arg: the new MTU
+ *
+ * we can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size. For now, we don't do any
+ * sanity checking on this, and we don't deal with what happens to
+ * programs that are already running when the size changes.
+ * NOTE: changing the MTU will usually cause the IBC to go back to
+ * link initialize (IPATH_IBSTATE_INIT) state...
+ */
+int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
+{
+ u32 piosize;
+ int changed = 0;
+ int ret;
+
+ /*
+ * mtu is IB data payload max. It's the largest power of 2 less
+ * than piosize (or even larger, since it only really controls the
+ * largest we can receive; we can send the max of the mtu and
+ * piosize). We check that it's one of the valid IB sizes.
+ */
+ if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
+ arg != 4096) {
+ ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (dd->ipath_ibmtu == arg) {
+ ret = 0; /* same as current */
+ goto bail;
+ }
+
+ piosize = dd->ipath_ibmaxlen;
+ dd->ipath_ibmtu = arg;
+
+ if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
+ /* Only if it's not the initial value (or reset to it) */
+ if (piosize != dd->ipath_init_ibmaxlen) {
+ dd->ipath_ibmaxlen = piosize;
+ changed = 1;
+ }
+ } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
+ piosize = arg + IPATH_PIO_MAXIBHDR;
+ ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
+ "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
+ arg);
+ dd->ipath_ibmaxlen = piosize;
+ changed = 1;
+ }
+
+ if (changed) {
+ /*
+ * set the IBC maxpktlength to the size of our pio
+ * buffers in words
+ */
+ u64 ibc = dd->ipath_ibcctrl;
+ ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
+ INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
+
+ piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
+ dd->ipath_ibmaxlen = piosize;
+ piosize /= sizeof(u32); /* in words */
+ /*
+ * for ICRC, which we only send in diag test pkt mode, and
+ * we don't need to worry about that for mtu
+ */
+ piosize += 1;
+
+ ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
+ dd->ipath_ibcctrl = ibc;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl);
+ dd->ipath_f_tidtemplate(dd);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
+
+int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
+{
+ ipath_stats.sps_lid[dd->ipath_unit] = arg;
+ dd->ipath_lid = arg;
+ dd->ipath_lmc = lmc;
+
+ mutex_lock(&ipath_layer_mutex);
+
+ if (dd->ipath_layer.l_arg && layer_intr)
+ layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
+
+ mutex_unlock(&ipath_layer_mutex);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
+
+int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
+{
+ /* XXX - need to inform anyone who cares this just happened. */
+ dd->ipath_guid = guid;
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
+
+__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
+{
+ return dd->ipath_guid;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
+
+u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
+{
+ return dd->ipath_nguid;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
+
+int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
+ u32 * boardrev, u32 * majrev, u32 * minrev)
+{
+ *vendor = dd->ipath_vendorid;
+ *boardrev = dd->ipath_boardrev;
+ *majrev = dd->ipath_majrev;
+ *minrev = dd->ipath_minrev;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_query_device);
+
+u32 ipath_layer_get_flags(struct ipath_devdata *dd)
+{
+ return dd->ipath_flags;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
+
+struct device *ipath_layer_get_device(struct ipath_devdata *dd)
+{
+ return &dd->pcidev->dev;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_device);
+
+u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
+{
+ return dd->ipath_deviceid;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
+
+u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
+{
+ return dd->ipath_lastibcstat;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
+
+u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
+{
+ return dd->ipath_ibmtu;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
+
+void ipath_layer_add(struct ipath_devdata *dd)
+{
+ mutex_lock(&ipath_layer_mutex);
+
+ if (layer_add_one)
+ dd->ipath_layer.l_arg =
+ layer_add_one(dd->ipath_unit, dd);
+
+ if (verbs_add_one)
+ dd->verbs_layer.l_arg =
+ verbs_add_one(dd->ipath_unit, dd);
+
+ mutex_unlock(&ipath_layer_mutex);
+}
+
+void ipath_layer_del(struct ipath_devdata *dd)
+{
+ mutex_lock(&ipath_layer_mutex);
+
+ if (dd->ipath_layer.l_arg && layer_remove_one) {
+ layer_remove_one(dd->ipath_layer.l_arg);
+ dd->ipath_layer.l_arg = NULL;
+ }
+
+ if (dd->verbs_layer.l_arg && verbs_remove_one) {
+ verbs_remove_one(dd->verbs_layer.l_arg);
+ dd->verbs_layer.l_arg = NULL;
+ }
+
+ mutex_unlock(&ipath_layer_mutex);
+}
+
+int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
+ void (*l_remove)(void *),
+ int (*l_intr)(void *, u32),
+ int (*l_rcv)(void *, void *, struct sk_buff *),
+ u16 l_rcv_opcode,
+ int (*l_rcv_lid)(void *, void *))
+{
+ struct ipath_devdata *dd, *tmp;
+ unsigned long flags;
+
+ mutex_lock(&ipath_layer_mutex);
+
+ layer_add_one = l_add;
+ layer_remove_one = l_remove;
+ layer_intr = l_intr;
+ layer_rcv = l_rcv;
+ layer_rcv_lid = l_rcv_lid;
+ ipath_layer_rcv_opcode = l_rcv_opcode;
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
+ if (!(dd->ipath_flags & IPATH_INITTED))
+ continue;
+
+ if (dd->ipath_layer.l_arg)
+ continue;
+
+ if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
+ *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ mutex_unlock(&ipath_layer_mutex);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_register);
+
+void ipath_layer_unregister(void)
+{
+ struct ipath_devdata *dd, *tmp;
+ unsigned long flags;
+
+ mutex_lock(&ipath_layer_mutex);
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
+ if (dd->ipath_layer.l_arg && layer_remove_one) {
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ layer_remove_one(dd->ipath_layer.l_arg);
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+ dd->ipath_layer.l_arg = NULL;
+ }
+ }
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+ layer_add_one = NULL;
+ layer_remove_one = NULL;
+ layer_intr = NULL;
+ layer_rcv = NULL;
+ layer_rcv_lid = NULL;
+
+ mutex_unlock(&ipath_layer_mutex);
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_unregister);
+
+static void __ipath_verbs_timer(unsigned long arg)
+{
+ struct ipath_devdata *dd = (struct ipath_devdata *) arg;
+
+ /*
+ * If port 0 receive packet interrupts are not available, or
+ * can be missed, poll the receive queue
+ */
+ if (dd->ipath_flags & IPATH_POLL_RX_INTR)
+ ipath_kreceive(dd);
+
+ /* Handle verbs layer timeouts. */
+ if (dd->verbs_layer.l_arg && verbs_timer_cb)
+ verbs_timer_cb(dd->verbs_layer.l_arg);
+
+ mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
+}
+
+/**
+ * ipath_verbs_register - verbs layer registration
+ * @l_piobufavail: callback for when PIO buffers become available
+ * @l_rcv: callback for receiving a packet
+ * @l_timer_cb: timer callback
+ * @ipath_devdata: device data structure is put here
+ */
+int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
+ void (*l_remove)(void *arg),
+ int (*l_piobufavail) (void *arg),
+ void (*l_rcv) (void *arg, void *rhdr,
+ void *data, u32 tlen),
+ void (*l_timer_cb) (void *arg))
+{
+ struct ipath_devdata *dd, *tmp;
+ unsigned long flags;
+
+ mutex_lock(&ipath_layer_mutex);
+
+ verbs_add_one = l_add;
+ verbs_remove_one = l_remove;
+ verbs_piobufavail = l_piobufavail;
+ verbs_rcv = l_rcv;
+ verbs_timer_cb = l_timer_cb;
+
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
+ if (!(dd->ipath_flags & IPATH_INITTED))
+ continue;
+
+ if (dd->verbs_layer.l_arg)
+ continue;
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ mutex_unlock(&ipath_layer_mutex);
+
+ ipath_verbs_registered = 1;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_verbs_register);
+
+void ipath_verbs_unregister(void)
+{
+ struct ipath_devdata *dd, *tmp;
+ unsigned long flags;
+
+ mutex_lock(&ipath_layer_mutex);
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+
+ list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
+ *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
+
+ if (dd->verbs_layer.l_arg && verbs_remove_one) {
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+ verbs_remove_one(dd->verbs_layer.l_arg);
+ spin_lock_irqsave(&ipath_devs_lock, flags);
+ dd->verbs_layer.l_arg = NULL;
+ }
+ }
+
+ spin_unlock_irqrestore(&ipath_devs_lock, flags);
+
+ verbs_add_one = NULL;
+ verbs_remove_one = NULL;
+ verbs_piobufavail = NULL;
+ verbs_rcv = NULL;
+ verbs_timer_cb = NULL;
+
+ mutex_unlock(&ipath_layer_mutex);
+}
+
+EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
+
+int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
+{
+ int ret;
+ u32 intval = 0;
+
+ mutex_lock(&ipath_layer_mutex);
+
+ if (!dd->ipath_layer.l_arg) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
+
+ if (ret < 0)
+ goto bail;
+
+ *pktmax = dd->ipath_ibmaxlen;
+
+ if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
+ intval |= IPATH_LAYER_INT_IF_UP;
+ if (ipath_stats.sps_lid[dd->ipath_unit])
+ intval |= IPATH_LAYER_INT_LID;
+ if (ipath_stats.sps_mlid[dd->ipath_unit])
+ intval |= IPATH_LAYER_INT_BCAST;
+ /*
+ * do this on open, in case low level is already up and
+ * just layered driver was reloaded, etc.
+ */
+ if (intval)
+ layer_intr(dd->ipath_layer.l_arg, intval);
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipath_layer_mutex);
+
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_open);
+
+u16 ipath_layer_get_lid(struct ipath_devdata *dd)
+{
+ return dd->ipath_lid;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
+
+/**
+ * ipath_layer_get_mac - get the MAC address
+ * @dd: the infinipath device
+ * @mac: the MAC is put here
+ *
+ * This is the EUID-64 OUI octets (top 3), then
+ * skip the next 2 (which should both be zero or 0xff).
+ * The returned MAC is in network order
+ * mac points to at least 6 bytes of buffer
+ * We assume that by the time the LID is set, that the GUID is as valid
+ * as it's ever going to be, rather than adding yet another status bit.
+ */
+
+int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
+{
+ u8 *guid;
+
+ guid = (u8 *) &dd->ipath_guid;
+
+ mac[0] = guid[0];
+ mac[1] = guid[1];
+ mac[2] = guid[2];
+ mac[3] = guid[5];
+ mac[4] = guid[6];
+ mac[5] = guid[7];
+ if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
+ ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
+ "%x %x\n", guid[3], guid[4]);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
+
+u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
+{
+ return dd->ipath_mlid;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
+
+u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
+{
+ return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
+
+static void update_sge(struct ipath_sge_state *ss, u32 length)
+{
+ struct ipath_sge *sge = &ss->sge;
+
+ sge->vaddr += length;
+ sge->length -= length;
+ sge->sge_length -= length;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr != NULL) {
+ if (++sge->n >= IPATH_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ return;
+ sge->n = 0;
+ }
+ sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+}
+
+#ifdef __LITTLE_ENDIAN
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#else
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#endif
+
+static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
+ u32 length)
+{
+ u32 extra = 0;
+ u32 data = 0;
+ u32 last;
+
+ while (1) {
+ u32 len = ss->sge.length;
+ u32 off;
+
+ BUG_ON(len == 0);
+ if (len > length)
+ len = length;
+ if (len > ss->sge.sge_length)
+ len = ss->sge.sge_length;
+ /* If the source address is not aligned, try to align it. */
+ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
+ if (off) {
+ u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
+ ~(sizeof(u32) - 1));
+ u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
+ u32 y;
+
+ y = sizeof(u32) - off;
+ if (len > y)
+ len = y;
+ if (len + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, extra *
+ BITS_PER_BYTE);
+ len = sizeof(u32) - extra;
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, len, extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += len;
+ }
+ } else if (extra) {
+ /* Source address is aligned. */
+ u32 *addr = (u32 *) ss->sge.vaddr;
+ int shift = extra * BITS_PER_BYTE;
+ int ushift = 32 - shift;
+ u32 l = len;
+
+ while (l >= sizeof(u32)) {
+ u32 v = *addr;
+
+ data |= set_upper_bits(v, shift);
+ __raw_writel(data, piobuf);
+ data = get_upper_bits(v, ushift);
+ piobuf++;
+ addr++;
+ l -= sizeof(u32);
+ }
+ /*
+ * We still have 'extra' number of bytes leftover.
+ */
+ if (l) {
+ u32 v = *addr;
+
+ if (l + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, shift);
+ len -= l + extra - sizeof(u32);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, l,
+ extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += l;
+ }
+ } else if (len == length) {
+ last = data;
+ break;
+ }
+ } else if (len == length) {
+ u32 w;
+
+ /*
+ * Need to round up for the last dword in the
+ * packet.
+ */
+ w = (len + 3) >> 2;
+ __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
+ piobuf += w - 1;
+ last = ((u32 *) ss->sge.vaddr)[w - 1];
+ break;
+ } else {
+ u32 w = len >> 2;
+
+ __iowrite32_copy(piobuf, ss->sge.vaddr, w);
+ piobuf += w;
+
+ extra = len & (sizeof(u32) - 1);
+ if (extra) {
+ u32 v = ((u32 *) ss->sge.vaddr)[w];
+
+ /* Clear unused upper bytes */
+ data = clear_upper_bytes(v, extra, 0);
+ }
+ }
+ update_sge(ss, len);
+ length -= len;
+ }
+ /* must flush early everything before trigger word */
+ ipath_flush_wc();
+ __raw_writel(last, piobuf);
+ /* be sure trigger word is written */
+ ipath_flush_wc();
+ update_sge(ss, length);
+}
+
+/**
+ * ipath_verbs_send - send a packet from the verbs layer
+ * @dd: the infinipath device
+ * @hdrwords: the number of works in the header
+ * @hdr: the packet header
+ * @len: the length of the packet in bytes
+ * @ss: the SGE to send
+ *
+ * This is like ipath_sma_send_pkt() in that we need to be able to send
+ * packets after the chip is initialized (MADs) but also like
+ * ipath_layer_send_hdr() since its used by the verbs layer.
+ */
+int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
+ u32 *hdr, u32 len, struct ipath_sge_state *ss)
+{
+ u32 __iomem *piobuf;
+ u32 plen;
+ int ret;
+
+ /* +1 is for the qword padding of pbc */
+ plen = hdrwords + ((len + 3) >> 2) + 1;
+ if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
+ ipath_dbg("packet len 0x%x too long, failing\n", plen);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* Get a PIO buffer to use. */
+ piobuf = ipath_getpiobuf(dd, NULL);
+ if (unlikely(piobuf == NULL)) {
+ ret = -EBUSY;
+ goto bail;
+ }
+
+ /*
+ * Write len to control qword, no flags.
+ * We have to flush after the PBC for correctness on some cpus
+ * or WC buffer can be written out of order.
+ */
+ writeq(plen, piobuf);
+ ipath_flush_wc();
+ piobuf += 2;
+ if (len == 0) {
+ /*
+ * If there is just the header portion, must flush before
+ * writing last word of header for correctness, and after
+ * the last header word (trigger word).
+ */
+ __iowrite32_copy(piobuf, hdr, hdrwords - 1);
+ ipath_flush_wc();
+ __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
+ ipath_flush_wc();
+ ret = 0;
+ goto bail;
+ }
+
+ __iowrite32_copy(piobuf, hdr, hdrwords);
+ piobuf += hdrwords;
+
+ /* The common case is aligned and contained in one segment. */
+ if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
+ !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
+ u32 w;
+
+ /* Need to round up for the last dword in the packet. */
+ w = (len + 3) >> 2;
+ __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
+ /* must flush early everything before trigger word */
+ ipath_flush_wc();
+ __raw_writel(((u32 *) ss->sge.vaddr)[w - 1],
+ piobuf + w - 1);
+ /* be sure trigger word is written */
+ ipath_flush_wc();
+ update_sge(ss, len);
+ ret = 0;
+ goto bail;
+ }
+ copy_io(piobuf, ss, len);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(ipath_verbs_send);
+
+int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait)
+{
+ int ret;
+
+ if (!(dd->ipath_flags & IPATH_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ipath_dbg("unit %u not usable\n", dd->ipath_unit);
+ ret = -EINVAL;
+ goto bail;
+ }
+ *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
+ *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
+ *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
+ *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+ *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
+
+/**
+ * ipath_layer_get_counters - get various chip counters
+ * @dd: the infinipath device
+ * @cntrs: counters are placed here
+ *
+ * Return the counters needed by recv_pma_get_portcounters().
+ */
+int ipath_layer_get_counters(struct ipath_devdata *dd,
+ struct ipath_layer_counters *cntrs)
+{
+ int ret;
+
+ if (!(dd->ipath_flags & IPATH_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ipath_dbg("unit %u not usable\n", dd->ipath_unit);
+ ret = -EINVAL;
+ goto bail;
+ }
+ cntrs->symbol_error_counter =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
+ cntrs->link_error_recovery_counter =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
+ cntrs->link_downed_counter =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
+ cntrs->port_rcv_errors =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
+ cntrs->port_rcv_remphys_errors =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
+ cntrs->port_xmit_discards =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
+ cntrs->port_xmit_data =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
+ cntrs->port_rcv_data =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
+ cntrs->port_xmit_packets =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
+ cntrs->port_rcv_packets =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
+
+int ipath_layer_want_buffer(struct ipath_devdata *dd)
+{
+ set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
+
+int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
+{
+ int ret = 0;
+ u32 __iomem *piobuf;
+ u32 plen, *uhdr;
+ size_t count;
+ __be16 vlsllnh;
+
+ if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
+ ipath_dbg("send while not open\n");
+ ret = -EINVAL;
+ } else
+ if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
+ dd->ipath_lid == 0) {
+ /*
+ * lid check is for when sma hasn't yet configured
+ */
+ ret = -ENETDOWN;
+ ipath_cdbg(VERBOSE, "send while not ready, "
+ "mylid=%u, flags=0x%x\n",
+ dd->ipath_lid, dd->ipath_flags);
+ }
+
+ vlsllnh = *((__be16 *) hdr);
+ if (vlsllnh != htons(IPS_LRH_BTH)) {
+ ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
+ "not sending\n", be16_to_cpu(vlsllnh),
+ IPS_LRH_BTH);
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto done;
+
+ /* Get a PIO buffer to use. */
+ piobuf = ipath_getpiobuf(dd, NULL);
+ if (piobuf == NULL) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ plen = (sizeof(*hdr) >> 2); /* actual length */
+ ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
+
+ writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
+ ipath_flush_wc();
+ piobuf += 2;
+ uhdr = (u32 *)hdr;
+ count = plen-1; /* amount we can copy before trigger word */
+ __iowrite32_copy(piobuf, uhdr, count);
+ ipath_flush_wc();
+ __raw_writel(uhdr[count], piobuf + count);
+ ipath_flush_wc(); /* ensure it's sent, now */
+
+ ipath_stats.sps_ether_spkts++; /* ether packet sent */
+
+done:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
+
+int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
+{
+ set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
+
+int ipath_layer_enable_timer(struct ipath_devdata *dd)
+{
+ /*
+ * HT-400 has a design flaw where the chip and kernel idea
+ * of the tail register don't always agree, and therefore we won't
+ * get an interrupt on the next packet received.
+ * If the board supports per packet receive interrupts, use it.
+ * Otherwise, the timer function periodically checks for packets
+ * to cover this case.
+ * Either way, the timer is needed for verbs layer related
+ * processing.
+ */
+ if (dd->ipath_flags & IPATH_GPIO_INTR) {
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
+ 0x2074076542310ULL);
+ /* Enable GPIO bit 2 interrupt */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
+ (u64) (1 << 2));
+ }
+
+ init_timer(&dd->verbs_layer.l_timer);
+ dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
+ dd->verbs_layer.l_timer.data = (unsigned long)dd;
+ dd->verbs_layer.l_timer.expires = jiffies + 1;
+ add_timer(&dd->verbs_layer.l_timer);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
+
+int ipath_layer_disable_timer(struct ipath_devdata *dd)
+{
+ /* Disable GPIO bit 2 interrupt */
+ if (dd->ipath_flags & IPATH_GPIO_INTR)
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
+
+ del_timer_sync(&dd->verbs_layer.l_timer);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
+
+/**
+ * ipath_layer_set_verbs_flags - set the verbs layer flags
+ * @dd: the infinipath device
+ * @flags: the flags to set
+ */
+int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
+{
+ struct ipath_devdata *ss;
+ unsigned long lflags;
+
+ spin_lock_irqsave(&ipath_devs_lock, lflags);
+
+ list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
+ if (!(ss->ipath_flags & IPATH_INITTED))
+ continue;
+ if ((flags & IPATH_VERBS_KERNEL_SMA) &&
+ !(*ss->ipath_statusp & IPATH_STATUS_SMA))
+ *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
+ else
+ *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
+ }
+
+ spin_unlock_irqrestore(&ipath_devs_lock, lflags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
+
+/**
+ * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
+ * @dd: the infinipath device
+ */
+unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
+{
+ return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
+
+/**
+ * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
+ * @dd: the infinipath device
+ * @index: the PKEY index
+ */
+unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
+{
+ unsigned ret;
+
+ if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
+ ret = 0;
+ else
+ ret = dd->ipath_pd[0]->port_pkeys[index];
+
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
+
+/**
+ * ipath_layer_get_pkeys - return the PKEY table for port 0
+ * @dd: the infinipath device
+ * @pkeys: the pkey table is placed here
+ */
+int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
+{
+ struct ipath_portdata *pd = dd->ipath_pd[0];
+
+ memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
+
+/**
+ * rm_pkey - decrecment the reference count for the given PKEY
+ * @dd: the infinipath device
+ * @key: the PKEY index
+ *
+ * Return true if this was the last reference and the hardware table entry
+ * needs to be changed.
+ */
+static int rm_pkey(struct ipath_devdata *dd, u16 key)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+ if (dd->ipath_pkeys[i] != key)
+ continue;
+ if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
+ dd->ipath_pkeys[i] = 0;
+ ret = 1;
+ goto bail;
+ }
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * add_pkey - add the given PKEY to the hardware table
+ * @dd: the infinipath device
+ * @key: the PKEY
+ *
+ * Return an error code if unable to add the entry, zero if no change,
+ * or 1 if the hardware PKEY register needs to be updated.
+ */
+static int add_pkey(struct ipath_devdata *dd, u16 key)
+{
+ int i;
+ u16 lkey = key & 0x7FFF;
+ int any = 0;
+ int ret;
+
+ if (lkey == 0x7FFF) {
+ ret = 0;
+ goto bail;
+ }
+
+ /* Look for an empty slot or a matching PKEY. */
+ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+ if (!dd->ipath_pkeys[i]) {
+ any++;
+ continue;
+ }
+ /* If it matches exactly, try to increment the ref count */
+ if (dd->ipath_pkeys[i] == key) {
+ if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
+ ret = 0;
+ goto bail;
+ }
+ /* Lost the race. Look for an empty slot below. */
+ atomic_dec(&dd->ipath_pkeyrefs[i]);
+ any++;
+ }
+ /*
+ * It makes no sense to have both the limited and unlimited
+ * PKEY set at the same time since the unlimited one will
+ * disable the limited one.
+ */
+ if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+ if (!dd->ipath_pkeys[i] &&
+ atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
+ /* for ipathstats, etc. */
+ ipath_stats.sps_pkeys[i] = lkey;
+ dd->ipath_pkeys[i] = key;
+ ret = 1;
+ goto bail;
+ }
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_layer_set_pkeys - set the PKEY table for port 0
+ * @dd: the infinipath device
+ * @pkeys: the PKEY table
+ */
+int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
+{
+ struct ipath_portdata *pd;
+ int i;
+ int changed = 0;
+
+ pd = dd->ipath_pd[0];
+
+ for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
+ u16 key = pkeys[i];
+ u16 okey = pd->port_pkeys[i];
+
+ if (key == okey)
+ continue;
+ /*
+ * The value of this PKEY table entry is changing.
+ * Remove the old entry in the hardware's array of PKEYs.
+ */
+ if (okey & 0x7FFF)
+ changed |= rm_pkey(dd, okey);
+ if (key & 0x7FFF) {
+ int ret = add_pkey(dd, key);
+
+ if (ret < 0)
+ key = 0;
+ else
+ changed |= ret;
+ }
+ pd->port_pkeys[i] = key;
+ }
+ if (changed) {
+ u64 pkey;
+
+ pkey = (u64) dd->ipath_pkeys[0] |
+ ((u64) dd->ipath_pkeys[1] << 16) |
+ ((u64) dd->ipath_pkeys[2] << 32) |
+ ((u64) dd->ipath_pkeys[3] << 48);
+ ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
+ (unsigned long long) pkey);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
+ pkey);
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
+
+/**
+ * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
+ * @dd: the infinipath device
+ *
+ * Returns zero if the default is POLL, 1 if the default is SLEEP.
+ */
+int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
+{
+ return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
+
+/**
+ * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
+ * @dd: the infinipath device
+ * @sleep: the new state
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
+ int sleep)
+{
+ if (sleep)
+ dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
+ else
+ dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
+
+int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
+{
+ return (dd->ipath_ibcctrl >>
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
+
+/**
+ * ipath_layer_set_phyerrthreshold - set the physical error threshold
+ * @dd: the infinipath device
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
+{
+ unsigned v;
+
+ v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+ if (v != n) {
+ dd->ipath_ibcctrl &=
+ ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
+ dd->ipath_ibcctrl |=
+ (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl);
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
+
+int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
+{
+ return (dd->ipath_ibcctrl >>
+ INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
+
+/**
+ * ipath_layer_set_overrunthreshold - set the overrun threshold
+ * @dd: the infinipath device
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
+{
+ unsigned v;
+
+ v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
+ if (v != n) {
+ dd->ipath_ibcctrl &=
+ ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
+ INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
+ dd->ipath_ibcctrl |=
+ (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl);
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
+
+int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
+ size_t namelen)
+{
+ return dd->ipath_f_get_boardname(dd, name, namelen);
+}
+EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
+
+u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
+{
+ return dd->ipath_rcvhdrentsize;
+}
+EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
new file mode 100644
index 0000000000000..6fefd15bd2da6
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_layer.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _IPATH_LAYER_H
+#define _IPATH_LAYER_H
+
+/*
+ * This header file is for symbols shared between the infinipath driver
+ * and drivers layered upon it (such as ipath).
+ */
+
+struct sk_buff;
+struct ipath_sge_state;
+struct ipath_devdata;
+struct ether_header;
+
+struct ipath_layer_counters {
+ u64 symbol_error_counter;
+ u64 link_error_recovery_counter;
+ u64 link_downed_counter;
+ u64 port_rcv_errors;
+ u64 port_rcv_remphys_errors;
+ u64 port_xmit_discards;
+ u64 port_xmit_data;
+ u64 port_rcv_data;
+ u64 port_xmit_packets;
+ u64 port_rcv_packets;
+};
+
+/*
+ * A segment is a linear region of low physical memory.
+ * XXX Maybe we should use phys addr here and kmap()/kunmap().
+ * Used by the verbs layer.
+ */
+struct ipath_seg {
+ void *vaddr;
+ size_t length;
+};
+
+/* The number of ipath_segs that fit in a page. */
+#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
+
+struct ipath_segarray {
+ struct ipath_seg segs[IPATH_SEGSZ];
+};
+
+struct ipath_mregion {
+ u64 user_base; /* User's address for this region */
+ u64 iova; /* IB start address of this region */
+ size_t length;
+ u32 lkey;
+ u32 offset; /* offset (bytes) to start of region */
+ int access_flags;
+ u32 max_segs; /* number of ipath_segs in all the arrays */
+ u32 mapsz; /* size of the map array */
+ struct ipath_segarray *map[0]; /* the segments */
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct ipath_sge {
+ struct ipath_mregion *mr;
+ void *vaddr; /* current pointer into the segment */
+ u32 sge_length; /* length of the SGE */
+ u32 length; /* remaining length of the segment */
+ u16 m; /* current index: mr->map[m] */
+ u16 n; /* current index: mr->map[m]->segs[n] */
+};
+
+struct ipath_sge_state {
+ struct ipath_sge *sg_list; /* next SGE to be used if any */
+ struct ipath_sge sge; /* progress state for the current SGE */
+ u8 num_sge;
+};
+
+int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
+ void (*l_remove)(void *),
+ int (*l_intr)(void *, u32),
+ int (*l_rcv)(void *, void *,
+ struct sk_buff *),
+ u16 rcv_opcode,
+ int (*l_rcv_lid)(void *, void *));
+int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
+ void (*l_remove)(void *arg),
+ int (*l_piobufavail)(void *arg),
+ void (*l_rcv)(void *arg, void *rhdr,
+ void *data, u32 tlen),
+ void (*l_timer_cb)(void *arg));
+void ipath_layer_unregister(void);
+void ipath_verbs_unregister(void);
+int ipath_layer_open(struct ipath_devdata *, u32 * pktmax);
+u16 ipath_layer_get_lid(struct ipath_devdata *dd);
+int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *);
+u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
+u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
+int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
+int ipath_layer_set_mtu(struct ipath_devdata *, u16);
+int ipath_set_sps_lid(struct ipath_devdata *, u32, u8);
+int ipath_layer_send_hdr(struct ipath_devdata *dd,
+ struct ether_header *hdr);
+int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
+ u32 * hdr, u32 len, struct ipath_sge_state *ss);
+int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd);
+int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
+ size_t namelen);
+int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait);
+int ipath_layer_get_counters(struct ipath_devdata *dd,
+ struct ipath_layer_counters *cntrs);
+int ipath_layer_want_buffer(struct ipath_devdata *dd);
+int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
+__be64 ipath_layer_get_guid(struct ipath_devdata *);
+u32 ipath_layer_get_nguid(struct ipath_devdata *);
+int ipath_layer_query_device(struct ipath_devdata *, u32 * vendor,
+ u32 * boardrev, u32 * majrev, u32 * minrev);
+u32 ipath_layer_get_flags(struct ipath_devdata *dd);
+struct device *ipath_layer_get_device(struct ipath_devdata *dd);
+u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
+u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
+u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
+int ipath_layer_enable_timer(struct ipath_devdata *dd);
+int ipath_layer_disable_timer(struct ipath_devdata *dd);
+int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags);
+unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd);
+unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index);
+int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 *pkeys);
+int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 *pkeys);
+int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd);
+int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
+ int sleep);
+int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd);
+int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n);
+int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd);
+int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n);
+u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd);
+
+/* ipath_ether interrupt values */
+#define IPATH_LAYER_INT_IF_UP 0x2
+#define IPATH_LAYER_INT_IF_DOWN 0x4
+#define IPATH_LAYER_INT_LID 0x8
+#define IPATH_LAYER_INT_SEND_CONTINUE 0x10
+#define IPATH_LAYER_INT_BCAST 0x40
+
+/* _verbs_layer.l_flags */
+#define IPATH_VERBS_KERNEL_SMA 0x1
+
+extern unsigned ipath_debug; /* debugging bit mask */
+
+#endif /* _IPATH_LAYER_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
new file mode 100644
index 0000000000000..f7f8391fe43fc
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -0,0 +1,1352 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "ipath_kernel.h"
+#include "ipath_verbs.h"
+#include "ips_common.h"
+
+#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004)
+#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C)
+#define IB_SMP_INVALID_FIELD __constant_htons(0x001C)
+
+static int reply(struct ib_smp *smp)
+{
+ /*
+ * The verbs framework will handle the directed/LID route
+ * packet changes.
+ */
+ smp->method = IB_MGMT_METHOD_GET_RESP;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ smp->status |= IB_SMP_DIRECTION;
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static int recv_subn_get_nodedescription(struct ib_smp *smp,
+ struct ib_device *ibdev)
+{
+ if (smp->attr_mod)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ strncpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+
+ return reply(smp);
+}
+
+struct nodeinfo {
+ u8 base_version;
+ u8 class_version;
+ u8 node_type;
+ u8 num_ports;
+ __be64 sys_guid;
+ __be64 node_guid;
+ __be64 port_guid;
+ __be16 partition_cap;
+ __be16 device_id;
+ __be32 revision;
+ u8 local_port_num;
+ u8 vendor_id[3];
+} __attribute__ ((packed));
+
+static int recv_subn_get_nodeinfo(struct ib_smp *smp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
+ struct ipath_devdata *dd = to_idev(ibdev)->dd;
+ u32 vendor, boardid, majrev, minrev;
+
+ if (smp->attr_mod)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ nip->base_version = 1;
+ nip->class_version = 1;
+ nip->node_type = 1; /* channel adapter */
+ /*
+ * XXX The num_ports value will need a layer function to get
+ * the value if we ever have more than one IB port on a chip.
+ * We will also need to get the GUID for the port.
+ */
+ nip->num_ports = ibdev->phys_port_cnt;
+ /* This is already in network order */
+ nip->sys_guid = to_idev(ibdev)->sys_image_guid;
+ nip->node_guid = ipath_layer_get_guid(dd);
+ nip->port_guid = nip->sys_guid;
+ nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd));
+ nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd));
+ ipath_layer_query_device(dd, &vendor, &boardid, &majrev, &minrev);
+ nip->revision = cpu_to_be32((majrev << 16) | minrev);
+ nip->local_port_num = port;
+ nip->vendor_id[0] = 0;
+ nip->vendor_id[1] = vendor >> 8;
+ nip->vendor_id[2] = vendor;
+
+ return reply(smp);
+}
+
+static int recv_subn_get_guidinfo(struct ib_smp *smp,
+ struct ib_device *ibdev)
+{
+ u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+ __be64 *p = (__be64 *) smp->data;
+
+ /* 32 blocks of 8 64-bit GUIDs per block */
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ /*
+ * We only support one GUID for now. If this changes, the
+ * portinfo.guid_cap field needs to be updated too.
+ */
+ if (startgx == 0)
+ /* The first is a copy of the read-only HW GUID. */
+ *p = ipath_layer_get_guid(to_idev(ibdev)->dd);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+struct port_info {
+ __be64 mkey;
+ __be64 gid_prefix;
+ __be16 lid;
+ __be16 sm_lid;
+ __be32 cap_mask;
+ __be16 diag_code;
+ __be16 mkey_lease_period;
+ u8 local_port_num;
+ u8 link_width_enabled;
+ u8 link_width_supported;
+ u8 link_width_active;
+ u8 linkspeed_portstate; /* 4 bits, 4 bits */
+ u8 portphysstate_linkdown; /* 4 bits, 4 bits */
+ u8 mkeyprot_resv_lmc; /* 2 bits, 3, 3 */
+ u8 linkspeedactive_enabled; /* 4 bits, 4 bits */
+ u8 neighbormtu_mastersmsl; /* 4 bits, 4 bits */
+ u8 vlcap_inittype; /* 4 bits, 4 bits */
+ u8 vl_high_limit;
+ u8 vl_arb_high_cap;
+ u8 vl_arb_low_cap;
+ u8 inittypereply_mtucap; /* 4 bits, 4 bits */
+ u8 vlstallcnt_hoqlife; /* 3 bits, 5 bits */
+ u8 operationalvl_pei_peo_fpi_fpo; /* 4 bits, 1, 1, 1, 1 */
+ __be16 mkey_violations;
+ __be16 pkey_violations;
+ __be16 qkey_violations;
+ u8 guid_cap;
+ u8 clientrereg_resv_subnetto; /* 1 bit, 2 bits, 5 */
+ u8 resv_resptimevalue; /* 3 bits, 5 bits */
+ u8 localphyerrors_overrunerrors; /* 4 bits, 4 bits */
+ __be16 max_credit_hint;
+ u8 resv;
+ u8 link_roundtrip_latency[3];
+} __attribute__ ((packed));
+
+static int recv_subn_get_portinfo(struct ib_smp *smp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ipath_ibdev *dev;
+ struct port_info *pip = (struct port_info *)smp->data;
+ u16 lid;
+ u8 ibcstat;
+ u8 mtu;
+ int ret;
+
+ if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ dev = to_idev(ibdev);
+
+ /* Clear all fields. Only set the non-zero fields. */
+ memset(smp->data, 0, sizeof(smp->data));
+
+ /* Only return the mkey if the protection field allows it. */
+ if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
+ (dev->mkeyprot_resv_lmc >> 6) == 0)
+ pip->mkey = dev->mkey;
+ pip->gid_prefix = dev->gid_prefix;
+ lid = ipath_layer_get_lid(dev->dd);
+ pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
+ pip->sm_lid = cpu_to_be16(dev->sm_lid);
+ pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
+ /* pip->diag_code; */
+ pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
+ pip->local_port_num = port;
+ pip->link_width_enabled = dev->link_width_enabled;
+ pip->link_width_supported = 3; /* 1x or 4x */
+ pip->link_width_active = 2; /* 4x */
+ pip->linkspeed_portstate = 0x10; /* 2.5Gbps */
+ ibcstat = ipath_layer_get_lastibcstat(dev->dd);
+ pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
+ pip->portphysstate_linkdown =
+ (ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
+ (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2);
+ pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc;
+ pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */
+ switch (ipath_layer_get_ibmtu(dev->dd)) {
+ case 4096:
+ mtu = IB_MTU_4096;
+ break;
+ case 2048:
+ mtu = IB_MTU_2048;
+ break;
+ case 1024:
+ mtu = IB_MTU_1024;
+ break;
+ case 512:
+ mtu = IB_MTU_512;
+ break;
+ case 256:
+ mtu = IB_MTU_256;
+ break;
+ default: /* oops, something is wrong */
+ mtu = IB_MTU_2048;
+ break;
+ }
+ pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
+ pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */
+ pip->vl_high_limit = dev->vl_high_limit;
+ /* pip->vl_arb_high_cap; // only one VL */
+ /* pip->vl_arb_low_cap; // only one VL */
+ /* InitTypeReply = 0 */
+ pip->inittypereply_mtucap = IB_MTU_4096;
+ // HCAs ignore VLStallCount and HOQLife
+ /* pip->vlstallcnt_hoqlife; */
+ pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
+ pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
+ /* P_KeyViolations are counted by hardware. */
+ pip->pkey_violations =
+ cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) -
+ dev->n_pkey_violations) & 0xFFFF);
+ pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
+ /* Only the hardware GUID is supported for now */
+ pip->guid_cap = 1;
+ pip->clientrereg_resv_subnetto = dev->subnet_timeout;
+ /* 32.768 usec. response time (guessing) */
+ pip->resv_resptimevalue = 3;
+ pip->localphyerrors_overrunerrors =
+ (ipath_layer_get_phyerrthreshold(dev->dd) << 4) |
+ ipath_layer_get_overrunthreshold(dev->dd);
+ /* pip->max_credit_hint; */
+ /* pip->link_roundtrip_latency[3]; */
+
+ ret = reply(smp);
+
+bail:
+ return ret;
+}
+
+static int recv_subn_get_pkeytable(struct ib_smp *smp,
+ struct ib_device *ibdev)
+{
+ u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+ u16 *p = (u16 *) smp->data;
+ __be16 *q = (__be16 *) smp->data;
+
+ /* 64 blocks of 32 16-bit P_Key entries */
+
+ memset(smp->data, 0, sizeof(smp->data));
+ if (startpx == 0) {
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ unsigned i, n = ipath_layer_get_npkeys(dev->dd);
+
+ ipath_layer_get_pkeys(dev->dd, p);
+
+ for (i = 0; i < n; i++)
+ q[i] = cpu_to_be16(p[i]);
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static int recv_subn_set_guidinfo(struct ib_smp *smp,
+ struct ib_device *ibdev)
+{
+ /* The only GUID we support is the first read-only entry. */
+ return recv_subn_get_guidinfo(smp, ibdev);
+}
+
+/**
+ * recv_subn_set_portinfo - set port information
+ * @smp: the incoming SM packet
+ * @ibdev: the infiniband device
+ * @port: the port on the device
+ *
+ * Set Portinfo (see ch. 14.2.5.6).
+ */
+static int recv_subn_set_portinfo(struct ib_smp *smp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct port_info *pip = (struct port_info *)smp->data;
+ struct ib_event event;
+ struct ipath_ibdev *dev;
+ u32 flags;
+ char clientrereg = 0;
+ u16 lid, smlid;
+ u8 lwe;
+ u8 lse;
+ u8 state;
+ u16 lstate;
+ u32 mtu;
+ int ret;
+
+ if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
+ goto err;
+
+ dev = to_idev(ibdev);
+ event.device = ibdev;
+ event.element.port_num = port;
+
+ dev->mkey = pip->mkey;
+ dev->gid_prefix = pip->gid_prefix;
+ dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
+
+ lid = be16_to_cpu(pip->lid);
+ if (lid != ipath_layer_get_lid(dev->dd)) {
+ /* Must be a valid unicast LID address. */
+ if (lid == 0 || lid >= IPS_MULTICAST_LID_BASE)
+ goto err;
+ ipath_set_sps_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ smlid = be16_to_cpu(pip->sm_lid);
+ if (smlid != dev->sm_lid) {
+ /* Must be a valid unicast LID address. */
+ if (smlid == 0 || smlid >= IPS_MULTICAST_LID_BASE)
+ goto err;
+ dev->sm_lid = smlid;
+ event.event = IB_EVENT_SM_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ /* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */
+ lwe = pip->link_width_enabled;
+ if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE))
+ goto err;
+ if (lwe == 0xFF)
+ dev->link_width_enabled = 3; /* 1x or 4x */
+ else if (lwe)
+ dev->link_width_enabled = lwe;
+
+ /* Only 2.5 Gbs supported. */
+ lse = pip->linkspeedactive_enabled & 0xF;
+ if (lse >= 2 && lse <= 0xE)
+ goto err;
+
+ /* Set link down default state. */
+ switch (pip->portphysstate_linkdown & 0xF) {
+ case 0: /* NOP */
+ break;
+ case 1: /* SLEEP */
+ if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1))
+ goto err;
+ break;
+ case 2: /* POLL */
+ if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0))
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ dev->mkeyprot_resv_lmc = pip->mkeyprot_resv_lmc;
+ dev->vl_high_limit = pip->vl_high_limit;
+
+ switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
+ case IB_MTU_256:
+ mtu = 256;
+ break;
+ case IB_MTU_512:
+ mtu = 512;
+ break;
+ case IB_MTU_1024:
+ mtu = 1024;
+ break;
+ case IB_MTU_2048:
+ mtu = 2048;
+ break;
+ case IB_MTU_4096:
+ mtu = 4096;
+ break;
+ default:
+ /* XXX We have already partially updated our state! */
+ goto err;
+ }
+ ipath_layer_set_mtu(dev->dd, mtu);
+
+ dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
+
+ /* We only support VL0 */
+ if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
+ goto err;
+
+ if (pip->mkey_violations == 0)
+ dev->mkey_violations = 0;
+
+ /*
+ * Hardware counter can't be reset so snapshot and subtract
+ * later.
+ */
+ if (pip->pkey_violations == 0)
+ dev->n_pkey_violations =
+ ipath_layer_get_cr_errpkey(dev->dd);
+
+ if (pip->qkey_violations == 0)
+ dev->qkey_violations = 0;
+
+ if (ipath_layer_set_phyerrthreshold(
+ dev->dd,
+ (pip->localphyerrors_overrunerrors >> 4) & 0xF))
+ goto err;
+
+ if (ipath_layer_set_overrunthreshold(
+ dev->dd,
+ (pip->localphyerrors_overrunerrors & 0xF)))
+ goto err;
+
+ dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
+
+ if (pip->clientrereg_resv_subnetto & 0x80) {
+ clientrereg = 1;
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ /*
+ * Do the port state change now that the other link parameters
+ * have been set.
+ * Changing the port physical state only makes sense if the link
+ * is down or is being set to down.
+ */
+ state = pip->linkspeed_portstate & 0xF;
+ flags = ipath_layer_get_flags(dev->dd);
+ lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
+ if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
+ goto err;
+
+ /*
+ * Only state changes of DOWN, ARM, and ACTIVE are valid
+ * and must be in the correct state to take effect (see 7.2.6).
+ */
+ switch (state) {
+ case IB_PORT_NOP:
+ if (lstate == 0)
+ break;
+ /* FALLTHROUGH */
+ case IB_PORT_DOWN:
+ if (lstate == 0)
+ if (ipath_layer_get_linkdowndefaultstate(dev->dd))
+ lstate = IPATH_IB_LINKDOWN_SLEEP;
+ else
+ lstate = IPATH_IB_LINKDOWN;
+ else if (lstate == 1)
+ lstate = IPATH_IB_LINKDOWN_SLEEP;
+ else if (lstate == 2)
+ lstate = IPATH_IB_LINKDOWN;
+ else if (lstate == 3)
+ lstate = IPATH_IB_LINKDOWN_DISABLE;
+ else
+ goto err;
+ ipath_layer_set_linkstate(dev->dd, lstate);
+ if (flags & IPATH_LINKACTIVE) {
+ event.event = IB_EVENT_PORT_ERR;
+ ib_dispatch_event(&event);
+ }
+ break;
+ case IB_PORT_ARMED:
+ if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE)))
+ break;
+ ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM);
+ if (flags & IPATH_LINKACTIVE) {
+ event.event = IB_EVENT_PORT_ERR;
+ ib_dispatch_event(&event);
+ }
+ break;
+ case IB_PORT_ACTIVE:
+ if (!(flags & IPATH_LINKARMED))
+ break;
+ ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
+ event.event = IB_EVENT_PORT_ACTIVE;
+ ib_dispatch_event(&event);
+ break;
+ default:
+ /* XXX We have already partially updated our state! */
+ goto err;
+ }
+
+ ret = recv_subn_get_portinfo(smp, ibdev, port);
+
+ if (clientrereg)
+ pip->clientrereg_resv_subnetto |= 0x80;
+
+ goto done;
+
+err:
+ smp->status |= IB_SMP_INVALID_FIELD;
+ ret = recv_subn_get_portinfo(smp, ibdev, port);
+
+done:
+ return ret;
+}
+
+static int recv_subn_set_pkeytable(struct ib_smp *smp,
+ struct ib_device *ibdev)
+{
+ u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+ __be16 *p = (__be16 *) smp->data;
+ u16 *q = (u16 *) smp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ unsigned i, n = ipath_layer_get_npkeys(dev->dd);
+
+ for (i = 0; i < n; i++)
+ q[i] = be16_to_cpu(p[i]);
+
+ if (startpx != 0 ||
+ ipath_layer_set_pkeys(dev->dd, q) != 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return recv_subn_get_pkeytable(smp, ibdev);
+}
+
+#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
+#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010)
+#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011)
+#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012)
+#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D)
+#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E)
+
+struct ib_perf {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ __be16 unused;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ u8 reserved[40];
+ u8 data[192];
+} __attribute__ ((packed));
+
+struct ib_pma_classportinfo {
+ u8 base_version;
+ u8 class_version;
+ __be16 cap_mask;
+ u8 reserved[3];
+ u8 resp_time_value; /* only lower 5 bits */
+ union ib_gid redirect_gid;
+ __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 redirect_lid;
+ __be16 redirect_pkey;
+ __be32 redirect_qp; /* only lower 24 bits */
+ __be32 redirect_qkey;
+ union ib_gid trap_gid;
+ __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 trap_lid;
+ __be16 trap_pkey;
+ __be32 trap_hl_qp; /* 8, 24 bits respectively */
+ __be32 trap_qkey;
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplescontrol {
+ u8 opcode;
+ u8 port_select;
+ u8 tick;
+ u8 counter_width; /* only lower 3 bits */
+ __be32 counter_mask0_9; /* 2, 10 * 3, bits */
+ __be16 counter_mask10_14; /* 1, 5 * 3, bits */
+ u8 sample_mechanisms;
+ u8 sample_status; /* only lower 2 bits */
+ __be64 option_mask;
+ __be64 vendor_mask;
+ __be32 sample_start;
+ __be32 sample_interval;
+ __be16 tag;
+ __be16 counter_select[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult_ext {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 extended_width; /* only upper 2 bits */
+ __be64 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved1;
+ u8 lli_ebor_errors; /* 4, 4, bits */
+ __be16 reserved2;
+ __be16 vl15_dropped;
+ __be32 port_xmit_data;
+ __be32 port_rcv_data;
+ __be32 port_xmit_packets;
+ __be32 port_rcv_packets;
+} __attribute__ ((packed));
+
+#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001)
+#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002)
+#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004)
+#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008)
+#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010)
+#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040)
+#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000)
+#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000)
+#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000)
+#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000)
+
+struct ib_pma_portcounters_ext {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be32 reserved1;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_unicast_xmit_packets;
+ __be64 port_unicast_rcv_packets;
+ __be64 port_multicast_xmit_packets;
+ __be64 port_multicast_rcv_packets;
+} __attribute__ ((packed));
+
+#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001)
+#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002)
+#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004)
+#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008)
+#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010)
+#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020)
+#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040)
+#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080)
+
+static int recv_pma_get_classportinfo(struct ib_perf *pmp)
+{
+ struct ib_pma_classportinfo *p =
+ (struct ib_pma_classportinfo *)pmp->data;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ if (pmp->attr_mod != 0)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ /* Indicate AllPortSelect is valid (only one port anyway) */
+ p->cap_mask = __constant_cpu_to_be16(1 << 8);
+ p->base_version = 1;
+ p->class_version = 1;
+ /*
+ * Expected response time is 4.096 usec. * 2^18 == 1.073741824
+ * sec.
+ */
+ p->resp_time_value = 18;
+
+ return reply((struct ib_smp *) pmp);
+}
+
+/*
+ * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
+ * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
+ * We support 5 counters which only count the mandatory quantities.
+ */
+#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
+#define COUNTER_MASK0_9 \
+ __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \
+ COUNTER_MASK(1, 1) | \
+ COUNTER_MASK(1, 2) | \
+ COUNTER_MASK(1, 3) | \
+ COUNTER_MASK(1, 4))
+
+static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplescontrol *p =
+ (struct ib_pma_portsamplescontrol *)pmp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ unsigned long flags;
+ u8 port_select = p->port_select;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 ||
+ (port_select != port && port_select != 0xFF))
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ /*
+ * Ticks are 10x the link transfer period which for 2.5Gbs is 4
+ * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
+ * intervals are counted in ticks. Since we use Linux timers, that
+ * count in jiffies, we can't sample for less than 1000 ticks if HZ
+ * == 1000 (4000 ticks if HZ is 250).
+ */
+ /* XXX This is WRONG. */
+ p->tick = 250; /* 1 usec. */
+ p->counter_width = 4; /* 32 bit counters */
+ p->counter_mask0_9 = COUNTER_MASK0_9;
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ p->sample_status = dev->pma_sample_status;
+ p->sample_start = cpu_to_be32(dev->pma_sample_start);
+ p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
+ p->tag = cpu_to_be16(dev->pma_tag);
+ p->counter_select[0] = dev->pma_counter_select[0];
+ p->counter_select[1] = dev->pma_counter_select[1];
+ p->counter_select[2] = dev->pma_counter_select[2];
+ p->counter_select[3] = dev->pma_counter_select[3];
+ p->counter_select[4] = dev->pma_counter_select[4];
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplescontrol *p =
+ (struct ib_pma_portsamplescontrol *)pmp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ unsigned long flags;
+ u32 start;
+ int ret;
+
+ if (pmp->attr_mod != 0 ||
+ (p->port_select != port && p->port_select != 0xFF)) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ start = be32_to_cpu(p->sample_start);
+ if (start != 0) {
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_DONE) {
+ dev->pma_sample_status =
+ IB_PMA_SAMPLE_STATUS_STARTED;
+ dev->pma_sample_start = start;
+ dev->pma_sample_interval =
+ be32_to_cpu(p->sample_interval);
+ dev->pma_tag = be16_to_cpu(p->tag);
+ if (p->counter_select[0])
+ dev->pma_counter_select[0] =
+ p->counter_select[0];
+ if (p->counter_select[1])
+ dev->pma_counter_select[1] =
+ p->counter_select[1];
+ if (p->counter_select[2])
+ dev->pma_counter_select[2] =
+ p->counter_select[2];
+ if (p->counter_select[3])
+ dev->pma_counter_select[3] =
+ p->counter_select[3];
+ if (p->counter_select[4])
+ dev->pma_counter_select[4] =
+ p->counter_select[4];
+ }
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ }
+ ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
+
+bail:
+ return ret;
+}
+
+static u64 get_counter(struct ipath_ibdev *dev, __be16 sel)
+{
+ u64 ret;
+
+ switch (sel) {
+ case IB_PMA_PORT_XMIT_DATA:
+ ret = dev->ipath_sword;
+ break;
+ case IB_PMA_PORT_RCV_DATA:
+ ret = dev->ipath_rword;
+ break;
+ case IB_PMA_PORT_XMIT_PKTS:
+ ret = dev->ipath_spkts;
+ break;
+ case IB_PMA_PORT_RCV_PKTS:
+ ret = dev->ipath_rpkts;
+ break;
+ case IB_PMA_PORT_XMIT_WAIT:
+ ret = dev->ipath_xmit_wait;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
+ struct ib_device *ibdev)
+{
+ struct ib_pma_portsamplesresult *p =
+ (struct ib_pma_portsamplesresult *)pmp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ int i;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+ p->tag = cpu_to_be16(dev->pma_tag);
+ p->sample_status = cpu_to_be16(dev->pma_sample_status);
+ for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
+ p->counter[i] = cpu_to_be32(
+ get_counter(dev, dev->pma_counter_select[i]));
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev)
+{
+ struct ib_pma_portsamplesresult_ext *p =
+ (struct ib_pma_portsamplesresult_ext *)pmp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ int i;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+ p->tag = cpu_to_be16(dev->pma_tag);
+ p->sample_status = cpu_to_be16(dev->pma_sample_status);
+ /* 64 bits */
+ p->extended_width = __constant_cpu_to_be32(0x80000000);
+ for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
+ p->counter[i] = cpu_to_be64(
+ get_counter(dev, dev->pma_counter_select[i]));
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int recv_pma_get_portcounters(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ struct ipath_layer_counters cntrs;
+ u8 port_select = p->port_select;
+
+ ipath_layer_get_counters(dev->dd, &cntrs);
+
+ /* Adjust counters for any resets done. */
+ cntrs.symbol_error_counter -= dev->n_symbol_error_counter;
+ cntrs.link_error_recovery_counter -=
+ dev->n_link_error_recovery_counter;
+ cntrs.link_downed_counter -= dev->n_link_downed_counter;
+ cntrs.port_rcv_errors += dev->rcv_errors;
+ cntrs.port_rcv_errors -= dev->n_port_rcv_errors;
+ cntrs.port_rcv_remphys_errors -= dev->n_port_rcv_remphys_errors;
+ cntrs.port_xmit_discards -= dev->n_port_xmit_discards;
+ cntrs.port_xmit_data -= dev->n_port_xmit_data;
+ cntrs.port_rcv_data -= dev->n_port_rcv_data;
+ cntrs.port_xmit_packets -= dev->n_port_xmit_packets;
+ cntrs.port_rcv_packets -= dev->n_port_rcv_packets;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 ||
+ (port_select != port && port_select != 0xFF))
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ if (cntrs.symbol_error_counter > 0xFFFFUL)
+ p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF);
+ else
+ p->symbol_error_counter =
+ cpu_to_be16((u16)cntrs.symbol_error_counter);
+ if (cntrs.link_error_recovery_counter > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter =
+ (u8)cntrs.link_error_recovery_counter;
+ if (cntrs.link_downed_counter > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter = (u8)cntrs.link_downed_counter;
+ if (cntrs.port_rcv_errors > 0xFFFFUL)
+ p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors =
+ cpu_to_be16((u16) cntrs.port_rcv_errors);
+ if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+ p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors =
+ cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
+ if (cntrs.port_xmit_discards > 0xFFFFUL)
+ p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards =
+ cpu_to_be16((u16)cntrs.port_xmit_discards);
+ if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
+ p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
+ if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
+ p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
+ if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
+ p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_xmit_packets =
+ cpu_to_be32((u32)cntrs.port_xmit_packets);
+ if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
+ p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_rcv_packets =
+ cpu_to_be32((u32) cntrs.port_rcv_packets);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters_ext *p =
+ (struct ib_pma_portcounters_ext *)pmp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ u64 swords, rwords, spkts, rpkts, xwait;
+ u8 port_select = p->port_select;
+
+ ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
+ &rpkts, &xwait);
+
+ /* Adjust counters for any resets done. */
+ swords -= dev->n_port_xmit_data;
+ rwords -= dev->n_port_rcv_data;
+ spkts -= dev->n_port_xmit_packets;
+ rpkts -= dev->n_port_rcv_packets;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 ||
+ (port_select != port && port_select != 0xFF))
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ p->port_xmit_data = cpu_to_be64(swords);
+ p->port_rcv_data = cpu_to_be64(rwords);
+ p->port_xmit_packets = cpu_to_be64(spkts);
+ p->port_rcv_packets = cpu_to_be64(rpkts);
+ p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
+ p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
+ p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
+ p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int recv_pma_set_portcounters(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ struct ipath_layer_counters cntrs;
+
+ /*
+ * Since the HW doesn't support clearing counters, we save the
+ * current count and subtract it from future responses.
+ */
+ ipath_layer_get_counters(dev->dd, &cntrs);
+
+ if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
+ dev->n_symbol_error_counter = cntrs.symbol_error_counter;
+
+ if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
+ dev->n_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+
+ if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
+ dev->n_link_downed_counter = cntrs.link_downed_counter;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
+ dev->n_port_rcv_errors =
+ cntrs.port_rcv_errors + dev->rcv_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
+ dev->n_port_rcv_remphys_errors =
+ cntrs.port_rcv_remphys_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
+ dev->n_port_xmit_discards = cntrs.port_xmit_discards;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
+ dev->n_port_xmit_data = cntrs.port_xmit_data;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
+ dev->n_port_rcv_data = cntrs.port_rcv_data;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
+ dev->n_port_xmit_packets = cntrs.port_xmit_packets;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
+ dev->n_port_rcv_packets = cntrs.port_rcv_packets;
+
+ return recv_pma_get_portcounters(pmp, ibdev, port);
+}
+
+static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ u64 swords, rwords, spkts, rpkts, xwait;
+
+ ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
+ &rpkts, &xwait);
+
+ if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
+ dev->n_port_xmit_data = swords;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
+ dev->n_port_rcv_data = rwords;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
+ dev->n_port_xmit_packets = spkts;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
+ dev->n_port_rcv_packets = rpkts;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
+ dev->n_unicast_xmit = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
+ dev->n_unicast_rcv = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
+ dev->n_multicast_xmit = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
+ dev->n_multicast_rcv = 0;
+
+ return recv_pma_get_portcounters_ext(pmp, ibdev, port);
+}
+
+static int process_subn(struct ib_device *ibdev, int mad_flags,
+ u8 port_num, struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_smp *smp = (struct ib_smp *)out_mad;
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ int ret;
+
+ *out_mad = *in_mad;
+ if (smp->class_version != 1) {
+ smp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ /* Is the mkey in the process of expiring? */
+ if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) {
+ /* Clear timeout and mkey protection field. */
+ dev->mkey_lease_timeout = 0;
+ dev->mkeyprot_resv_lmc &= 0x3F;
+ }
+
+ /*
+ * M_Key checking depends on
+ * Portinfo:M_Key_protect_bits
+ */
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
+ dev->mkey != smp->mkey &&
+ (smp->method == IB_MGMT_METHOD_SET ||
+ (smp->method == IB_MGMT_METHOD_GET &&
+ (dev->mkeyprot_resv_lmc >> 7) != 0))) {
+ if (dev->mkey_violations != 0xFFFF)
+ ++dev->mkey_violations;
+ if (dev->mkey_lease_timeout ||
+ dev->mkey_lease_period == 0) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ dev->mkey_lease_timeout = jiffies +
+ dev->mkey_lease_period * HZ;
+ /* Future: Generate a trap notice. */
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ } else if (dev->mkey_lease_timeout)
+ dev->mkey_lease_timeout = 0;
+
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_NODE_DESC:
+ ret = recv_subn_get_nodedescription(smp, ibdev);
+ goto bail;
+ case IB_SMP_ATTR_NODE_INFO:
+ ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
+ goto bail;
+ case IB_SMP_ATTR_GUID_INFO:
+ ret = recv_subn_get_guidinfo(smp, ibdev);
+ goto bail;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = recv_subn_get_portinfo(smp, ibdev, port_num);
+ goto bail;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = recv_subn_get_pkeytable(smp, ibdev);
+ goto bail;
+ case IB_SMP_ATTR_SM_INFO:
+ if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ if (dev->port_cap_flags & IB_PORT_SM) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_GUID_INFO:
+ ret = recv_subn_set_guidinfo(smp, ibdev);
+ goto bail;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = recv_subn_set_portinfo(smp, ibdev, port_num);
+ goto bail;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = recv_subn_set_pkeytable(smp, ibdev);
+ goto bail;
+ case IB_SMP_ATTR_SM_INFO:
+ if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ if (dev->port_cap_flags & IB_PORT_SM) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_FAILURE;
+ goto bail;
+ default:
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply(smp);
+ }
+
+bail:
+ return ret;
+}
+
+static int process_perf(struct ib_device *ibdev, u8 port_num,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ int ret;
+
+ *out_mad = *in_mad;
+ if (pmp->class_version != 1) {
+ pmp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ switch (pmp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (pmp->attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ ret = recv_pma_get_classportinfo(pmp);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_CONTROL:
+ ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
+ port_num);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_RESULT:
+ ret = recv_pma_get_portsamplesresult(pmp, ibdev);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_RESULT_EXT:
+ ret = recv_pma_get_portsamplesresult_ext(pmp,
+ ibdev);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS:
+ ret = recv_pma_get_portcounters(pmp, ibdev,
+ port_num);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = recv_pma_get_portcounters_ext(pmp, ibdev,
+ port_num);
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (pmp->attr_id) {
+ case IB_PMA_PORT_SAMPLES_CONTROL:
+ ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
+ port_num);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS:
+ ret = recv_pma_set_portcounters(pmp, ibdev,
+ port_num);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = recv_pma_set_portcounters_ext(pmp, ibdev,
+ port_num);
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_FAILURE;
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_smp *) pmp);
+ }
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port_num: the port number this packet came in on
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ *
+ * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
+ * interested in processing.
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ */
+int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ int ret;
+
+ /*
+ * Snapshot current HW counters to "clear" them.
+ * This should be done when the driver is loaded except that for
+ * some reason we get a zillion errors when brining up the link.
+ */
+ if (dev->rcv_errors == 0) {
+ struct ipath_layer_counters cntrs;
+
+ ipath_layer_get_counters(to_idev(ibdev)->dd, &cntrs);
+ dev->rcv_errors++;
+ dev->n_symbol_error_counter = cntrs.symbol_error_counter;
+ dev->n_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+ dev->n_link_downed_counter = cntrs.link_downed_counter;
+ dev->n_port_rcv_errors = cntrs.port_rcv_errors + 1;
+ dev->n_port_rcv_remphys_errors =
+ cntrs.port_rcv_remphys_errors;
+ dev->n_port_xmit_discards = cntrs.port_xmit_discards;
+ dev->n_port_xmit_data = cntrs.port_xmit_data;
+ dev->n_port_rcv_data = cntrs.port_rcv_data;
+ dev->n_port_xmit_packets = cntrs.port_xmit_packets;
+ dev->n_port_rcv_packets = cntrs.port_rcv_packets;
+ }
+ switch (in_mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ ret = process_subn(ibdev, mad_flags, port_num,
+ in_mad, out_mad);
+ goto bail;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ ret = process_perf(ibdev, port_num, in_mad, out_mad);
+ goto bail;
+ default:
+ ret = IB_MAD_RESULT_SUCCESS;
+ }
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
new file mode 100644
index 0000000000000..69ffec66d45da
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_pack.h>
+#include <rdma/ib_smi.h>
+
+#include "ipath_verbs.h"
+
+/**
+ * ipath_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct ipath_mr *mr;
+ struct ib_mr *ret;
+
+ mr = kzalloc(sizeof *mr, GFP_KERNEL);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.access_flags = acc;
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+static struct ipath_mr *alloc_mr(int count,
+ struct ipath_lkey_table *lk_table)
+{
+ struct ipath_mr *mr;
+ int m, i = 0;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
+ mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+ if (!mr)
+ goto done;
+
+ /* Allocate first level page tables. */
+ for (; i < m; i++) {
+ mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
+ if (!mr->mr.map[i])
+ goto bail;
+ }
+ mr->mr.mapsz = m;
+
+ /*
+ * ib_reg_phys_mr() will initialize mr->ibmr except for
+ * lkey and rkey.
+ */
+ if (!ipath_alloc_lkey(lk_table, &mr->mr))
+ goto bail;
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
+
+ goto done;
+
+bail:
+ while (i) {
+ i--;
+ kfree(mr->mr.map[i]);
+ }
+ kfree(mr);
+ mr = NULL;
+
+done:
+ return mr;
+}
+
+/**
+ * ipath_reg_phys_mr - register a physical memory region
+ * @pd: protection domain for this memory region
+ * @buffer_list: pointer to the list of physical buffers to register
+ * @num_phys_buf: the number of physical buffers to register
+ * @iova_start: the starting address passed over IB which maps to this MR
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+ struct ipath_mr *mr;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
+ if (mr == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.user_base = *iova_start;
+ mr->mr.iova = *iova_start;
+ mr->mr.length = 0;
+ mr->mr.offset = 0;
+ mr->mr.access_flags = acc;
+ mr->mr.max_segs = num_phys_buf;
+
+ m = 0;
+ n = 0;
+ for (i = 0; i < num_phys_buf; i++) {
+ mr->mr.map[m]->segs[n].vaddr =
+ phys_to_virt(buffer_list[i].addr);
+ mr->mr.map[m]->segs[n].length = buffer_list[i].size;
+ mr->mr.length += buffer_list[i].size;
+ n++;
+ if (n == IPATH_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @region: the user memory region
+ * @mr_access_flags: access flags for this memory region
+ * @udata: unused by the InfiniPath driver
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
+ int mr_access_flags, struct ib_udata *udata)
+{
+ struct ipath_mr *mr;
+ struct ib_umem_chunk *chunk;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ n = 0;
+ list_for_each_entry(chunk, &region->chunk_list, list)
+ n += chunk->nents;
+
+ mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.user_base = region->user_base;
+ mr->mr.iova = region->virt_base;
+ mr->mr.length = region->length;
+ mr->mr.offset = region->offset;
+ mr->mr.access_flags = mr_access_flags;
+ mr->mr.max_segs = n;
+
+ m = 0;
+ n = 0;
+ list_for_each_entry(chunk, &region->chunk_list, list) {
+ for (i = 0; i < chunk->nmap; i++) {
+ mr->mr.map[m]->segs[n].vaddr =
+ page_address(chunk->page_list[i].page);
+ mr->mr.map[m]->segs[n].length = region->page_size;
+ n++;
+ if (n == IPATH_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ *
+ * Returns 0 on success.
+ *
+ * Note that this is called to free MRs created by ipath_get_dma_mr()
+ * or ipath_reg_user_mr().
+ */
+int ipath_dereg_mr(struct ib_mr *ibmr)
+{
+ struct ipath_mr *mr = to_imr(ibmr);
+ int i;
+
+ ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
+ i = mr->mr.mapsz;
+ while (i) {
+ i--;
+ kfree(mr->mr.map[i]);
+ }
+ kfree(mr);
+ return 0;
+}
+
+/**
+ * ipath_alloc_fmr - allocate a fast memory region
+ * @pd: the protection domain for this memory region
+ * @mr_access_flags: access flags for this memory region
+ * @fmr_attr: fast memory region attributes
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ struct ipath_fmr *fmr;
+ int m, i = 0;
+ struct ib_fmr *ret;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
+ fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+ if (!fmr)
+ goto bail;
+
+ /* Allocate first level page tables. */
+ for (; i < m; i++) {
+ fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
+ GFP_KERNEL);
+ if (!fmr->mr.map[i])
+ goto bail;
+ }
+ fmr->mr.mapsz = m;
+
+ /*
+ * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+ * rkey.
+ */
+ if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
+ goto bail;
+ fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
+ /*
+ * Resources are allocated but no valid mapping (RKEY can't be
+ * used).
+ */
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ fmr->mr.offset = 0;
+ fmr->mr.access_flags = mr_access_flags;
+ fmr->mr.max_segs = fmr_attr->max_pages;
+ fmr->page_shift = fmr_attr->page_shift;
+
+ ret = &fmr->ibfmr;
+ goto done;
+
+bail:
+ while (i)
+ kfree(fmr->mr.map[--i]);
+ kfree(fmr);
+ ret = ERR_PTR(-ENOMEM);
+
+done:
+ return ret;
+}
+
+/**
+ * ipath_map_phys_fmr - set up a fast memory region
+ * @ibmfr: the fast memory region to set up
+ * @page_list: the list of pages to associate with the fast memory region
+ * @list_len: the number of pages to associate with the fast memory region
+ * @iova: the virtual address of the start of the fast memory region
+ *
+ * This may be called from interrupt context.
+ */
+
+int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
+ int list_len, u64 iova)
+{
+ struct ipath_fmr *fmr = to_ifmr(ibfmr);
+ struct ipath_lkey_table *rkt;
+ unsigned long flags;
+ int m, n, i;
+ u32 ps;
+ int ret;
+
+ if (list_len > fmr->mr.max_segs) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ rkt = &to_idev(ibfmr->device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = iova;
+ fmr->mr.iova = iova;
+ ps = 1 << fmr->page_shift;
+ fmr->mr.length = list_len * ps;
+ m = 0;
+ n = 0;
+ ps = 1 << fmr->page_shift;
+ for (i = 0; i < list_len; i++) {
+ fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]);
+ fmr->mr.map[m]->segs[n].length = ps;
+ if (++n == IPATH_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_unmap_fmr - unmap fast memory regions
+ * @fmr_list: the list of fast memory regions to unmap
+ *
+ * Returns 0 on success.
+ */
+int ipath_unmap_fmr(struct list_head *fmr_list)
+{
+ struct ipath_fmr *fmr;
+ struct ipath_lkey_table *rkt;
+ unsigned long flags;
+
+ list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+ rkt = &to_idev(fmr->ibfmr.device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * ipath_dealloc_fmr - deallocate a fast memory region
+ * @ibfmr: the fast memory region to deallocate
+ *
+ * Returns 0 on success.
+ */
+int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct ipath_fmr *fmr = to_ifmr(ibfmr);
+ int i;
+
+ ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
+ i = fmr->mr.mapsz;
+ while (i)
+ kfree(fmr->mr.map[--i]);
+ kfree(fmr);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c
new file mode 100644
index 0000000000000..e693a7a826670
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_pe800.c
@@ -0,0 +1,1247 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * InfiniPath PE-800 chip.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+
+#include "ipath_kernel.h"
+#include "ipath_registers.h"
+
+/*
+ * This file contains all the chip-specific register information and
+ * access functions for the PathScale PE800, the PCI-Express chip.
+ *
+ * This lists the InfiniPath PE800 registers, in the actual chip layout.
+ * This structure should never be directly accessed.
+ */
+struct _infinipath_do_not_use_kernel_regs {
+ unsigned long long Revision;
+ unsigned long long Control;
+ unsigned long long PageAlign;
+ unsigned long long PortCnt;
+ unsigned long long DebugPortSelect;
+ unsigned long long Reserved0;
+ unsigned long long SendRegBase;
+ unsigned long long UserRegBase;
+ unsigned long long CounterRegBase;
+ unsigned long long Scratch;
+ unsigned long long Reserved1;
+ unsigned long long Reserved2;
+ unsigned long long IntBlocked;
+ unsigned long long IntMask;
+ unsigned long long IntStatus;
+ unsigned long long IntClear;
+ unsigned long long ErrorMask;
+ unsigned long long ErrorStatus;
+ unsigned long long ErrorClear;
+ unsigned long long HwErrMask;
+ unsigned long long HwErrStatus;
+ unsigned long long HwErrClear;
+ unsigned long long HwDiagCtrl;
+ unsigned long long MDIO;
+ unsigned long long IBCStatus;
+ unsigned long long IBCCtrl;
+ unsigned long long ExtStatus;
+ unsigned long long ExtCtrl;
+ unsigned long long GPIOOut;
+ unsigned long long GPIOMask;
+ unsigned long long GPIOStatus;
+ unsigned long long GPIOClear;
+ unsigned long long RcvCtrl;
+ unsigned long long RcvBTHQP;
+ unsigned long long RcvHdrSize;
+ unsigned long long RcvHdrCnt;
+ unsigned long long RcvHdrEntSize;
+ unsigned long long RcvTIDBase;
+ unsigned long long RcvTIDCnt;
+ unsigned long long RcvEgrBase;
+ unsigned long long RcvEgrCnt;
+ unsigned long long RcvBufBase;
+ unsigned long long RcvBufSize;
+ unsigned long long RxIntMemBase;
+ unsigned long long RxIntMemSize;
+ unsigned long long RcvPartitionKey;
+ unsigned long long Reserved3;
+ unsigned long long RcvPktLEDCnt;
+ unsigned long long Reserved4[8];
+ unsigned long long SendCtrl;
+ unsigned long long SendPIOBufBase;
+ unsigned long long SendPIOSize;
+ unsigned long long SendPIOBufCnt;
+ unsigned long long SendPIOAvailAddr;
+ unsigned long long TxIntMemBase;
+ unsigned long long TxIntMemSize;
+ unsigned long long Reserved5;
+ unsigned long long PCIeRBufTestReg0;
+ unsigned long long PCIeRBufTestReg1;
+ unsigned long long Reserved51[6];
+ unsigned long long SendBufferError;
+ unsigned long long SendBufferErrorCONT1;
+ unsigned long long Reserved6SBE[6];
+ unsigned long long RcvHdrAddr0;
+ unsigned long long RcvHdrAddr1;
+ unsigned long long RcvHdrAddr2;
+ unsigned long long RcvHdrAddr3;
+ unsigned long long RcvHdrAddr4;
+ unsigned long long Reserved7RHA[11];
+ unsigned long long RcvHdrTailAddr0;
+ unsigned long long RcvHdrTailAddr1;
+ unsigned long long RcvHdrTailAddr2;
+ unsigned long long RcvHdrTailAddr3;
+ unsigned long long RcvHdrTailAddr4;
+ unsigned long long Reserved8RHTA[11];
+ unsigned long long Reserved9SW[8];
+ unsigned long long SerdesConfig0;
+ unsigned long long SerdesConfig1;
+ unsigned long long SerdesStatus;
+ unsigned long long XGXSConfig;
+ unsigned long long IBPLLCfg;
+ unsigned long long Reserved10SW2[3];
+ unsigned long long PCIEQ0SerdesConfig0;
+ unsigned long long PCIEQ0SerdesConfig1;
+ unsigned long long PCIEQ0SerdesStatus;
+ unsigned long long Reserved11;
+ unsigned long long PCIEQ1SerdesConfig0;
+ unsigned long long PCIEQ1SerdesConfig1;
+ unsigned long long PCIEQ1SerdesStatus;
+ unsigned long long Reserved12;
+};
+
+#define IPATH_KREG_OFFSET(field) (offsetof(struct \
+ _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
+#define IPATH_CREG_OFFSET(field) (offsetof( \
+ struct infinipath_counters, field) / sizeof(u64))
+
+static const struct ipath_kregs ipath_pe_kregs = {
+ .kr_control = IPATH_KREG_OFFSET(Control),
+ .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
+ .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
+ .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
+ .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
+ .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
+ .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
+ .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
+ .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
+ .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
+ .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
+ .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
+ .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
+ .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
+ .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
+ .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
+ .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
+ .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
+ .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
+ .kr_intclear = IPATH_KREG_OFFSET(IntClear),
+ .kr_intmask = IPATH_KREG_OFFSET(IntMask),
+ .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
+ .kr_mdio = IPATH_KREG_OFFSET(MDIO),
+ .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
+ .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
+ .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
+ .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
+ .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
+ .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
+ .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
+ .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
+ .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
+ .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
+ .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
+ .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
+ .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
+ .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
+ .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
+ .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
+ .kr_revision = IPATH_KREG_OFFSET(Revision),
+ .kr_scratch = IPATH_KREG_OFFSET(Scratch),
+ .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
+ .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
+ .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
+ .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
+ .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
+ .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
+ .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
+ .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
+ .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
+ .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
+ .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
+ .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
+ .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
+ .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
+ .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
+
+ /*
+ * These should not be used directly via ipath_read_kreg64(),
+ * use them with ipath_read_kreg64_port()
+ */
+ .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
+ .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
+
+ /* This group is pe-800-specific; and used only in this file */
+ /* The rcvpktled register controls one of the debug port signals, so
+ * a packet activity LED can be connected to it. */
+ .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
+ .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
+ .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
+ .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
+ .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
+ .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
+ .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
+ .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
+ .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
+};
+
+static const struct ipath_cregs ipath_pe_cregs = {
+ .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
+ .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
+ .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
+ .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
+ .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
+ .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
+ .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
+ .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
+ .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
+ .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
+ .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
+ .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
+ .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
+ .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
+ .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
+ .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
+ .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
+ .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
+ .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
+ .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
+ .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
+ .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
+ .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
+ .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
+ .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
+ .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
+ .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
+ .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
+ .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
+ .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
+ .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
+ .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
+ .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
+};
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define INFINIPATH_I_RCVURG_MASK 0x1F
+#define INFINIPATH_I_RCVAVAIL_MASK 0x1F
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
+#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
+#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
+#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
+#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
+#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
+#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
+#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
+
+/* kr_extstatus bits */
+#define INFINIPATH_EXTS_FREQSEL 0x2
+#define INFINIPATH_EXTS_SERDESSEL 0x4
+#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
+#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
+
+#define _IPATH_GPIO_SDA_NUM 1
+#define _IPATH_GPIO_SCL_NUM 0
+
+#define IPATH_GPIO_SDA (1ULL << \
+ (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+#define IPATH_GPIO_SCL (1ULL << \
+ (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+
+/**
+ * ipath_pe_handle_hwerrors - display hardware errors.
+ * @dd: the infinipath device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. We reuse the same message buffer as
+ * ipath_handle_errors() to avoid excessive stack usage.
+ */
+void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+ size_t msgl)
+{
+ ipath_err_t hwerrs;
+ u32 bits, ctrl;
+ int isfatal = 0;
+ char bitsmsg[64];
+
+ hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
+ if (!hwerrs) {
+ /*
+ * better than printing cofusing messages
+ * This seems to be related to clearing the crc error, or
+ * the pll error during init.
+ */
+ ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
+ return;
+ } else if (hwerrs == ~0ULL) {
+ ipath_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ return;
+ }
+ ipath_stats.sps_hwerrs++;
+
+ /* Always clear the error status register, except MEMBISTFAIL,
+ * regardless of whether we continue or stop using the chip.
+ * We want that set so we know it failed, even across driver reload.
+ * We'll still ignore it in the hwerrmask. We do this partly for
+ * diagnostics, but also for support */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+ hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
+
+ hwerrs &= dd->ipath_hwerrmask;
+
+ /*
+ * make sure we get this much out, unless told to be quiet,
+ * or it's occurred within the last 5 seconds
+ */
+ if ((hwerrs & ~dd->ipath_lasthwerror) ||
+ (ipath_debug & __IPATH_VERBDBG))
+ dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+ dd->ipath_lasthwerror |= hwerrs;
+
+ if (hwerrs & ~infinipath_hwe_bitsextant)
+ ipath_dev_err(dd, "hwerror interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (hwerrs & ~infinipath_hwe_bitsextant));
+
+ ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
+ if (ctrl & INFINIPATH_C_FREEZEMODE) {
+ if (hwerrs) {
+ /*
+ * if any set that we aren't ignoring only make the
+ * complaint once, in case it's stuck or recurring,
+ * and we get here multiple times
+ */
+ if (dd->ipath_flags & IPATH_INITTED) {
+ ipath_dev_err(dd, "Fatal Error (freeze "
+ "mode), no longer usable\n");
+ isfatal = 1;
+ }
+ /*
+ * Mark as having had an error for driver, and also
+ * for /sys and status word mapped to user programs.
+ * This marks unit as not usable, until reset
+ */
+ *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+ *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+ dd->ipath_flags &= ~IPATH_INITTED;
+ } else {
+ ipath_dbg("Clearing freezemode on ignored hardware "
+ "error\n");
+ ctrl &= ~INFINIPATH_C_FREEZEMODE;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+ ctrl);
+ }
+ }
+
+ *msg = '\0';
+
+ if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
+ strlcat(msg, "[Memory BIST test failed, PE-800 unusable]",
+ msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+ dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+ dd->ipath_hwerrmask);
+ }
+ if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
+ << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
+ INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
+ bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+ if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
+ << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
+ INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
+ bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+ if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
+ << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
+ INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof bitsmsg,
+ "[PCIe Mem Parity Errs %x] ", bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+ if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
+ strlcat(msg, "[IB2IPATH Parity]", msgl);
+ if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
+ strlcat(msg, "[IPATH2IB Parity]", msgl);
+
+#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
+ INFINIPATH_HWE_COREPLL_RFSLIP )
+
+ if (hwerrs & _IPATH_PLL_FAIL) {
+ snprintf(bitsmsg, sizeof bitsmsg,
+ "[PLL failed (%llx), PE-800 unusable]",
+ (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
+ strlcat(msg, bitsmsg, msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+ dd->ipath_hwerrmask);
+ }
+
+ if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
+ /*
+ * If it occurs, it is left masked since the eternal
+ * interface is unused
+ */
+ dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+ dd->ipath_hwerrmask);
+ }
+
+ if (hwerrs & INFINIPATH_HWE_PCIEPOISONEDTLP)
+ strlcat(msg, "[PCIe Poisoned TLP]", msgl);
+ if (hwerrs & INFINIPATH_HWE_PCIECPLTIMEOUT)
+ strlcat(msg, "[PCIe completion timeout]", msgl);
+
+ /*
+ * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+ * parity or memory parity error failures, because most likely we
+ * won't be able to talk to the core of the chip. Nonetheless, we
+ * might see them, if they are in parts of the PCIe core that aren't
+ * essential.
+ */
+ if (hwerrs & INFINIPATH_HWE_PCIE1PLLFAILED)
+ strlcat(msg, "[PCIePLL1]", msgl);
+ if (hwerrs & INFINIPATH_HWE_PCIE0PLLFAILED)
+ strlcat(msg, "[PCIePLL0]", msgl);
+ if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXTLH)
+ strlcat(msg, "[PCIe XTLH core parity]", msgl);
+ if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXADM)
+ strlcat(msg, "[PCIe ADM TX core parity]", msgl);
+ if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYRADM)
+ strlcat(msg, "[PCIe ADM RX core parity]", msgl);
+
+ if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
+ strlcat(msg, "[Rx Dsync]", msgl);
+ if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
+ strlcat(msg, "[SerDes PLL]", msgl);
+
+ ipath_dev_err(dd, "%s hardware error\n", msg);
+ if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
+ /*
+ * for /sys status file ; if no trailing } is copied, we'll
+ * know it was truncated.
+ */
+ snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
+ "{%s}", msg);
+ }
+}
+
+/**
+ * ipath_pe_boardname - fill in the board name
+ * @dd: the infinipath device
+ * @name: the output buffer
+ * @namelen: the size of the output buffer
+ *
+ * info is based on the board revision register
+ */
+static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
+ size_t namelen)
+{
+ char *n = NULL;
+ u8 boardrev = dd->ipath_boardrev;
+ int ret;
+
+ switch (boardrev) {
+ case 0:
+ n = "InfiniPath_Emulation";
+ break;
+ case 1:
+ n = "InfiniPath_PE-800-Bringup";
+ break;
+ case 2:
+ n = "InfiniPath_PE-880";
+ break;
+ case 3:
+ n = "InfiniPath_PE-850";
+ break;
+ case 4:
+ n = "InfiniPath_PE-860";
+ break;
+ default:
+ ipath_dev_err(dd,
+ "Don't yet know about board with ID %u\n",
+ boardrev);
+ snprintf(name, namelen, "Unknown_InfiniPath_PE-8xx_%u",
+ boardrev);
+ break;
+ }
+ if (n)
+ snprintf(name, namelen, "%s", n);
+
+ if (dd->ipath_majrev != 4 || dd->ipath_minrev != 1) {
+ ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n",
+ dd->ipath_majrev, dd->ipath_minrev);
+ ret = 1;
+ } else
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * ipath_pe_init_hwerrors - enable hardware errors
+ * @dd: the infinipath device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
+{
+ ipath_err_t val;
+ u64 extsval;
+
+ extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+
+ if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
+ ipath_dev_err(dd, "MemBIST did not complete!\n");
+
+ val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
+
+ if (!dd->ipath_boardrev) // no PLL for Emulator
+ val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+
+ /* workaround bug 9460 in internal interface bus parity checking */
+ val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
+
+ dd->ipath_hwerrmask = val;
+}
+
+/**
+ * ipath_pe_bringup_serdes - bring up the serdes
+ * @dd: the infinipath device
+ */
+int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
+{
+ u64 val, tmp, config1;
+ int ret = 0, change = 0;
+
+ ipath_dbg("Trying to bringup serdes\n");
+
+ if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
+ INFINIPATH_HWE_SERDESPLLFAILED) {
+ ipath_dbg("At start, serdes PLL failed bit set "
+ "in hwerrstatus, clearing and continuing\n");
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+ INFINIPATH_HWE_SERDESPLLFAILED);
+ }
+
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+ config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
+
+ ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
+ "xgxsconfig %llx\n", (unsigned long long) val,
+ (unsigned long long) config1, (unsigned long long)
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+ /*
+ * Force reset on, also set rxdetect enable. Must do before reading
+ * serdesstatus at least for simulation, or some of the bits in
+ * serdes status will come back as undefined and cause simulation
+ * failures
+ */
+ val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
+ | INFINIPATH_SERDC0_L1PWR_DN;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+ /* be sure chip saw it */
+ tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ udelay(5); /* need pll reset set at least for a bit */
+ /*
+ * after PLL is reset, set the per-lane Resets and TxIdle and
+ * clear the PLL reset and rxdetect (to get falling edge).
+ * Leave L1PWR bits set (permanently)
+ */
+ val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
+ | INFINIPATH_SERDC0_L1PWR_DN);
+ val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
+ ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
+ "and txidle (%llx)\n", (unsigned long long) val);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+ /* be sure chip saw it */
+ tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ /* need PLL reset clear for at least 11 usec before lane
+ * resets cleared; give it a few more to be sure */
+ udelay(15);
+ val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
+
+ ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
+ "(writing %llx)\n", (unsigned long long) val);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+ /* be sure chip saw it */
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+ if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
+ INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
+ val &=
+ ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
+ INFINIPATH_XGXS_MDIOADDR_SHIFT);
+ /* MDIO address 3 */
+ val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
+ change = 1;
+ }
+ if (val & INFINIPATH_XGXS_RESET) {
+ val &= ~INFINIPATH_XGXS_RESET;
+ change = 1;
+ }
+ if (change)
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+
+ /* clear current and de-emphasis bits */
+ config1 &= ~0x0ffffffff00ULL;
+ /* set current to 20ma */
+ config1 |= 0x00000000000ULL;
+ /* set de-emphasis to -5.68dB */
+ config1 |= 0x0cccc000000ULL;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
+
+ ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
+ "config1=%llx, sstatus=%llx xgxs=%llx\n",
+ (unsigned long long) val, (unsigned long long) config1,
+ (unsigned long long)
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
+ (unsigned long long)
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+ if (!ipath_waitfor_mdio_cmdready(dd)) {
+ ipath_write_kreg(
+ dd, dd->ipath_kregs->kr_mdio,
+ ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
+ IPATH_MDIO_CTRL_XGXS_REG_8, 0));
+ if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
+ IPATH_MDIO_DATAVALID, &val))
+ ipath_dbg("Never got MDIO data for XGXS "
+ "status read\n");
+ else
+ ipath_cdbg(VERBOSE, "MDIO Read reg8, "
+ "'bank' 31 %x\n", (u32) val);
+ } else
+ ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
+
+ return ret;
+}
+
+/**
+ * ipath_pe_quiet_serdes - set serdes to txidle
+ * @dd: the infinipath device
+ * Called when driver is being unloaded
+ */
+void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
+{
+ u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+
+ val |= INFINIPATH_SERDC0_TXIDLE;
+ ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
+ (unsigned long long) val);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+}
+
+/* this is not yet needed on the PE800, so just return 0. */
+static int ipath_pe_intconfig(struct ipath_devdata *dd)
+{
+ return 0;
+}
+
+/**
+ * ipath_setup_pe_setextled - set the state of the two external LEDs
+ * @dd: the infinipath device
+ * @lst: the L state
+ * @ltst: the LT state
+
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
+ u64 ltst)
+{
+ u64 extctl;
+
+ /* the diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running */
+ if (ipath_diag_inuse)
+ return;
+
+ extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
+ INFINIPATH_EXTC_LED2PRIPORT_ON);
+
+ if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP)
+ extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
+ if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
+ extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
+ dd->ipath_extctrl = extctl;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
+}
+
+/**
+ * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the infinipath device
+ *
+ * This is called during driver unload.
+ * We do the pci_disable_msi here, not in generic code, because it
+ * isn't used for the HT-400. If we do end up needing pci_enable_msi
+ * at some point in the future for HT-400, we'll move the call back
+ * into the main init_one code.
+ */
+static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
+{
+ dd->ipath_msi_lo = 0; /* just in case unload fails */
+ pci_disable_msi(dd->pcidev);
+}
+
+/**
+ * ipath_setup_pe_config - setup PCIe config related stuff
+ * @dd: the infinipath device
+ * @pdev: the PCI device
+ *
+ * The pci_enable_msi() call will fail on systems with MSI quirks
+ * such as those with AMD8131, even if the device of interest is not
+ * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
+ * late in 2.6.16).
+ * All that can be done is to edit the kernel source to remove the quirk
+ * check until that is fixed.
+ * We do not need to call enable_msi() for our HyperTransport chip (HT-400),
+ * even those it uses MSI, and we want to avoid the quirk warning, so
+ * So we call enable_msi only for the PE-800. If we do end up needing
+ * pci_enable_msi at some point in the future for HT-400, we'll move the
+ * call back into the main init_one code.
+ * We save the msi lo and hi values, so we can restore them after
+ * chip reset (the kernel PCI infrastructure doesn't yet handle that
+ * correctly).
+ */
+static int ipath_setup_pe_config(struct ipath_devdata *dd,
+ struct pci_dev *pdev)
+{
+ int pos, ret;
+
+ dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
+ ret = pci_enable_msi(dd->pcidev);
+ if (ret)
+ ipath_dev_err(dd, "pci_enable_msi failed: %d, "
+ "interrupts may not work\n", ret);
+ /* continue even if it fails, we may still be OK... */
+
+ if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
+ u16 control;
+ pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+ &dd->ipath_msi_lo);
+ pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+ &dd->ipath_msi_hi);
+ pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+ &control);
+ /* now save the data (vector) info */
+ pci_read_config_word(dd->pcidev,
+ pos + ((control & PCI_MSI_FLAGS_64BIT)
+ ? 12 : 8),
+ &dd->ipath_msi_data);
+ ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
+ "0x%x, control=0x%x\n", dd->ipath_msi_data,
+ pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+ control);
+ /* we save the cachelinesize also, although it doesn't
+ * really matter */
+ pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
+ &dd->ipath_pci_cacheline);
+ } else
+ ipath_dev_err(dd, "Can't find MSI capability, "
+ "can't save MSI settings for reset\n");
+ if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) {
+ u16 linkstat;
+ pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
+ &linkstat);
+ linkstat >>= 4;
+ linkstat &= 0x1f;
+ if (linkstat != 8)
+ ipath_dev_err(dd, "PCIe width %u, "
+ "performance reduced\n", linkstat);
+ }
+ else
+ ipath_dev_err(dd, "Can't find PCI Express "
+ "capability!\n");
+ return 0;
+}
+
+static void ipath_init_pe_variables(void)
+{
+ /*
+ * bits for selecting i2c direction and values,
+ * used for I2C serial flash
+ */
+ ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
+ ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
+ ipath_gpio_sda = IPATH_GPIO_SDA;
+ ipath_gpio_scl = IPATH_GPIO_SCL;
+
+ /* variables for sanity checking interrupt and errors */
+ infinipath_hwe_bitsextant =
+ (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
+ (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
+ INFINIPATH_HWE_PCIE1PLLFAILED |
+ INFINIPATH_HWE_PCIE0PLLFAILED |
+ INFINIPATH_HWE_PCIEPOISONEDTLP |
+ INFINIPATH_HWE_PCIECPLTIMEOUT |
+ INFINIPATH_HWE_PCIEBUSPARITYXTLH |
+ INFINIPATH_HWE_PCIEBUSPARITYXADM |
+ INFINIPATH_HWE_PCIEBUSPARITYRADM |
+ INFINIPATH_HWE_MEMBISTFAILED |
+ INFINIPATH_HWE_COREPLL_FBSLIP |
+ INFINIPATH_HWE_COREPLL_RFSLIP |
+ INFINIPATH_HWE_SERDESPLLFAILED |
+ INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
+ INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
+ infinipath_i_bitsextant =
+ (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
+ (INFINIPATH_I_RCVAVAIL_MASK <<
+ INFINIPATH_I_RCVAVAIL_SHIFT) |
+ INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
+ INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
+ infinipath_e_bitsextant =
+ INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
+ INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
+ INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
+ INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
+ INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
+ INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
+ INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+ INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
+ INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
+ INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
+ INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
+ INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
+ INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
+ INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
+ INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
+ INFINIPATH_E_HARDWARE;
+
+ infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
+ infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
+}
+
+/* setup the MSI stuff again after a reset. I'd like to just call
+ * pci_enable_msi() and request_irq() again, but when I do that,
+ * the MSI enable bit doesn't get set in the command word, and
+ * we switch to to a different interrupt vector, which is confusing,
+ * so I instead just do it all inline. Perhaps somehow can tie this
+ * into the PCIe hotplug support at some point
+ * Note, because I'm doing it all here, I don't call pci_disable_msi()
+ * or free_irq() at the start of ipath_setup_pe_reset().
+ */
+static int ipath_reinit_msi(struct ipath_devdata *dd)
+{
+ int pos;
+ u16 control;
+ int ret;
+
+ if (!dd->ipath_msi_lo) {
+ dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
+ "initial setup failed?\n");
+ ret = 0;
+ goto bail;
+ }
+
+ if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
+ ipath_dev_err(dd, "Can't find MSI capability, "
+ "can't restore MSI settings\n");
+ ret = 0;
+ goto bail;
+ }
+ ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
+ dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
+ pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+ dd->ipath_msi_lo);
+ ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
+ dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
+ pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+ dd->ipath_msi_hi);
+ pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
+ if (!(control & PCI_MSI_FLAGS_ENABLE)) {
+ ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
+ "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
+ control, control | PCI_MSI_FLAGS_ENABLE);
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+ control);
+ }
+ /* now rewrite the data (vector) info */
+ pci_write_config_word(dd->pcidev, pos +
+ ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+ dd->ipath_msi_data);
+ /* we restore the cachelinesize also, although it doesn't really
+ * matter */
+ pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
+ dd->ipath_pci_cacheline);
+ /* and now set the pci master bit again */
+ pci_set_master(dd->pcidev);
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/* This routine sleeps, so it can only be called from user context, not
+ * from interrupt context. If we need interrupt context, we can split
+ * it into two routines.
+*/
+static int ipath_setup_pe_reset(struct ipath_devdata *dd)
+{
+ u64 val;
+ int i;
+ int ret;
+
+ /* Use ERROR so it shows up in logs, etc. */
+ ipath_dev_err(dd, "Resetting PE-800 unit %u\n",
+ dd->ipath_unit);
+ val = dd->ipath_control | INFINIPATH_C_RESET;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
+ mb();
+
+ for (i = 1; i <= 5; i++) {
+ int r;
+ /* allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 2000);
+ if ((r =
+ pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+ dd->ipath_pcibar0)))
+ ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
+ r);
+ if ((r =
+ pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+ dd->ipath_pcibar1)))
+ ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
+ r);
+ /* now re-enable memory access */
+ if ((r = pci_enable_device(dd->pcidev)))
+ ipath_dev_err(dd, "pci_enable_device failed after "
+ "reset: %d\n", r);
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
+ if (val == dd->ipath_revision) {
+ ipath_cdbg(VERBOSE, "Got matching revision "
+ "register %llx on try %d\n",
+ (unsigned long long) val, i);
+ ret = ipath_reinit_msi(dd);
+ goto bail;
+ }
+ /* Probably getting -1 back */
+ ipath_dbg("Didn't get expected revision register, "
+ "got %llx, try %d\n", (unsigned long long) val,
+ i + 1);
+ }
+ ret = 0; /* failed */
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_pe_put_tid - write a TID in chip
+ * @dd: the infinipath device
+ * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for special locking etc.
+ * It's used for both the full cleanup on exit, as well as the normal
+ * setup and teardown.
+ */
+static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+ unsigned long flags = 0; /* keep gcc quiet */
+
+ if (pa != dd->ipath_tidinvalid) {
+ if (pa & ((1U << 11) - 1)) {
+ dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
+ "not 4KB aligned!\n", pa);
+ return;
+ }
+ pa >>= 11;
+ /* paranoia check */
+ if (pa & (7<<29))
+ ipath_dev_err(dd,
+ "BUG: Physical page address 0x%lx "
+ "has bits set in 31-29\n", pa);
+
+ if (type == 0)
+ pa |= dd->ipath_tidtemplate;
+ else /* for now, always full 4KB page */
+ pa |= 2 << 29;
+ }
+
+ /* workaround chip bug 9437 by writing each TID twice
+ * and holding a spinlock around the writes, so they don't
+ * intermix with other TID (eager or expected) writes
+ * Unfortunately, this call can be done from interrupt level
+ * for the port 0 eager TIDs, so we have to use irqsave
+ */
+ spin_lock_irqsave(&dd->ipath_tid_lock, flags);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
+ if (dd->ipath_kregbase)
+ writel(pa, tidp32);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
+ mmiowb();
+ spin_unlock_irqrestore(&dd->ipath_tid_lock, flags);
+}
+
+/**
+ * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
+ * @dd: the infinipath device
+ * @port: the port
+ *
+ * clear all TID entries for a port, expected and eager.
+ * Used from ipath_close(). On PE800, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ int i;
+
+ if (!dd->ipath_kregbase)
+ return;
+
+ ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
+
+ tidinv = dd->ipath_tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->ipath_kregbase) +
+ dd->ipath_rcvtidbase +
+ port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->ipath_rcvtidcnt; i++)
+ ipath_pe_put_tid(dd, &tidbase[i], 0, tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->ipath_kregbase) +
+ dd->ipath_rcvegrbase +
+ port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->ipath_rcvegrcnt; i++)
+ ipath_pe_put_tid(dd, &tidbase[i], 1, tidinv);
+}
+
+/**
+ * ipath_pe_tidtemplate - setup constants for TID updates
+ * @dd: the infinipath device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
+{
+ u32 egrsize = dd->ipath_rcvegrbufsize;
+
+ /* For now, we always allocate 4KB buffers (at init) so we can
+ * receive max size packets. We may want a module parameter to
+ * specify 2KB or 4KB and/or make be per port instead of per device
+ * for those who want to reduce memory footprint. Note that the
+ * ipath_rcvhdrentsize size must be large enough to hold the largest
+ * IB header (currently 96 bytes) that we expect to handle (plus of
+ * course the 2 dwords of RHF).
+ */
+ if (egrsize == 2048)
+ dd->ipath_tidtemplate = 1U << 29;
+ else if (egrsize == 4096)
+ dd->ipath_tidtemplate = 2U << 29;
+ else {
+ egrsize = 4096;
+ dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
+ "%u, using %u\n", dd->ipath_rcvegrbufsize,
+ egrsize);
+ dd->ipath_tidtemplate = 2U << 29;
+ }
+ dd->ipath_tidinvalid = 0;
+}
+
+static int ipath_pe_early_init(struct ipath_devdata *dd)
+{
+ dd->ipath_flags |= IPATH_4BYTE_TID;
+
+ /*
+ * For openib, we need to be able to handle an IB header of 96 bytes
+ * or 24 dwords. HT-400 has arbitrary sized receive buffers, so we
+ * made them the same size as the PIO buffers. The PE-800 does not
+ * handle arbitrary size buffers, so we need the header large enough
+ * to handle largest IB header, but still have room for a 2KB MTU
+ * standard IB packet.
+ */
+ dd->ipath_rcvhdrentsize = 24;
+ dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
+
+ /* For HT-400, we allocate a somewhat overly large eager buffer,
+ * such that we can guarantee that we can receive the largest packet
+ * that we can send out. To truly support a 4KB MTU, we need to
+ * bump this to a larger value. We'll do this when I get around to
+ * testing 4KB sends on the PE-800, which I have not yet done.
+ */
+ dd->ipath_rcvegrbufsize = 2048;
+ /*
+ * the min() check here is currently a nop, but it may not always
+ * be, depending on just how we do ipath_rcvegrbufsize
+ */
+ dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
+ dd->ipath_rcvegrbufsize +
+ (dd->ipath_rcvhdrentsize << 2));
+ dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
+
+ /*
+ * For PE-800, we can request a receive interrupt for 1 or
+ * more packets from current offset. For now, we set this
+ * up for a single packet, to match the HT-400 behavior.
+ */
+ dd->ipath_rhdrhead_intr_off = 1ULL<<32;
+
+ return 0;
+}
+
+int __attribute__((weak)) ipath_unordered_wc(void)
+{
+ return 0;
+}
+
+/**
+ * ipath_init_pe_get_base_info - set chip-specific flags for user code
+ * @dd: the infinipath device
+ * @kbase: ipath_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
+{
+ struct ipath_base_info *kinfo = kbase;
+
+ if (ipath_unordered_wc()) {
+ kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
+ ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
+ }
+ else
+ ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
+
+ kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
+
+ return 0;
+}
+
+/**
+ * ipath_init_pe800_funcs - set up the chip-specific function pointers
+ * @dd: the infinipath device
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+void ipath_init_pe800_funcs(struct ipath_devdata *dd)
+{
+ dd->ipath_f_intrsetup = ipath_pe_intconfig;
+ dd->ipath_f_bus = ipath_setup_pe_config;
+ dd->ipath_f_reset = ipath_setup_pe_reset;
+ dd->ipath_f_get_boardname = ipath_pe_boardname;
+ dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
+ dd->ipath_f_early_init = ipath_pe_early_init;
+ dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
+ dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
+ dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
+ dd->ipath_f_clear_tids = ipath_pe_clear_tids;
+ dd->ipath_f_put_tid = ipath_pe_put_tid;
+ dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
+ dd->ipath_f_setextled = ipath_setup_pe_setextled;
+ dd->ipath_f_get_base_info = ipath_pe_get_base_info;
+
+ /* initialize chip-specific variables */
+ dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
+
+ /*
+ * setup the register offsets, since they are different for each
+ * chip
+ */
+ dd->ipath_kregs = &ipath_pe_kregs;
+ dd->ipath_cregs = &ipath_pe_cregs;
+
+ ipath_init_pe_variables();
+}
+
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
new file mode 100644
index 0000000000000..6058d70d7577b
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -0,0 +1,913 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
+#include "ipath_verbs.h"
+#include "ips_common.h"
+
+#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+#define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
+ (off))
+#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
+ BITS_PER_PAGE, off)
+
+#define TRANS_INVALID 0
+#define TRANS_ANY2RST 1
+#define TRANS_RST2INIT 2
+#define TRANS_INIT2INIT 3
+#define TRANS_INIT2RTR 4
+#define TRANS_RTR2RTS 5
+#define TRANS_RTS2RTS 6
+#define TRANS_SQERR2RTS 7
+#define TRANS_ANY2ERR 8
+#define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */
+#define TRANS_SQD2SQD 10 /* error if not drained & parameter change */
+#define TRANS_SQD2RTS 11 /* error if not drained */
+
+/*
+ * Convert the AETH credit code into the number of credits.
+ */
+static u32 credit_table[31] = {
+ 0, /* 0 */
+ 1, /* 1 */
+ 2, /* 2 */
+ 3, /* 3 */
+ 4, /* 4 */
+ 6, /* 5 */
+ 8, /* 6 */
+ 12, /* 7 */
+ 16, /* 8 */
+ 24, /* 9 */
+ 32, /* A */
+ 48, /* B */
+ 64, /* C */
+ 96, /* D */
+ 128, /* E */
+ 192, /* F */
+ 256, /* 10 */
+ 384, /* 11 */
+ 512, /* 12 */
+ 768, /* 13 */
+ 1024, /* 14 */
+ 1536, /* 15 */
+ 2048, /* 16 */
+ 3072, /* 17 */
+ 4096, /* 18 */
+ 6144, /* 19 */
+ 8192, /* 1A */
+ 12288, /* 1B */
+ 16384, /* 1C */
+ 24576, /* 1D */
+ 32768 /* 1E */
+};
+
+static u32 alloc_qpn(struct ipath_qp_table *qpt)
+{
+ u32 i, offset, max_scan, qpn;
+ struct qpn_map *map;
+ u32 ret;
+
+ qpn = qpt->last + 1;
+ if (qpn >= QPN_MAX)
+ qpn = 2;
+ offset = qpn & BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / BITS_PER_PAGE];
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+ unsigned long flags;
+
+ /*
+ * Free the page if someone raced with us
+ * installing it:
+ */
+ spin_lock_irqsave(&qpt->lock, flags);
+ if (map->page)
+ free_page(page);
+ else
+ map->page = (void *)page;
+ spin_unlock_irqrestore(&qpt->lock, flags);
+ if (unlikely(!map->page))
+ break;
+ }
+ if (likely(atomic_read(&map->n_free))) {
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ atomic_dec(&map->n_free);
+ qpt->last = qpn;
+ ret = qpn;
+ goto bail;
+ }
+ offset = find_next_offset(map, offset);
+ qpn = mk_qpn(qpt, map, offset);
+ /*
+ * This test differs from alloc_pidmap().
+ * If find_next_offset() does find a zero
+ * bit, we don't need to check for QPN
+ * wrapping around past our starting QPN.
+ * We just need to be sure we don't loop
+ * forever.
+ */
+ } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ }
+ /*
+ * In order to keep the number of pages allocated to a
+ * minimum, we scan the all existing pages before increasing
+ * the size of the bitmap table.
+ */
+ if (++i > max_scan) {
+ if (qpt->nmaps == QPNMAP_ENTRIES)
+ break;
+ map = &qpt->map[qpt->nmaps++];
+ offset = 0;
+ } else if (map < &qpt->map[qpt->nmaps]) {
+ ++map;
+ offset = 0;
+ } else {
+ map = &qpt->map[0];
+ offset = 2;
+ }
+ qpn = mk_qpn(qpt, map, offset);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
+{
+ struct qpn_map *map;
+
+ map = qpt->map + qpn / BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+ atomic_inc(&map->n_free);
+}
+
+/**
+ * ipath_alloc_qpn - allocate a QP number
+ * @qpt: the QP table
+ * @qp: the QP
+ * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
+ *
+ * Allocate the next available QPN and put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
+ enum ib_qp_type type)
+{
+ unsigned long flags;
+ u32 qpn;
+ int ret;
+
+ if (type == IB_QPT_SMI)
+ qpn = 0;
+ else if (type == IB_QPT_GSI)
+ qpn = 1;
+ else {
+ /* Allocate the next available QPN */
+ qpn = alloc_qpn(qpt);
+ if (qpn == 0) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ }
+ qp->ibqp.qp_num = qpn;
+
+ /* Add the QP to the hash table. */
+ spin_lock_irqsave(&qpt->lock, flags);
+
+ qpn %= qpt->max;
+ qp->next = qpt->table[qpn];
+ qpt->table[qpn] = qp;
+ atomic_inc(&qp->refcount);
+
+ spin_unlock_irqrestore(&qpt->lock, flags);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_free_qp - remove a QP from the QP table
+ * @qpt: the QP table
+ * @qp: the QP to remove
+ *
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive interrupt routine.
+ */
+void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
+{
+ struct ipath_qp *q, **qpp;
+ unsigned long flags;
+ int fnd = 0;
+
+ spin_lock_irqsave(&qpt->lock, flags);
+
+ /* Remove QP from the hash table. */
+ qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
+ for (; (q = *qpp) != NULL; qpp = &q->next) {
+ if (q == qp) {
+ *qpp = qp->next;
+ qp->next = NULL;
+ atomic_dec(&qp->refcount);
+ fnd = 1;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&qpt->lock, flags);
+
+ if (!fnd)
+ return;
+
+ /* If QPN is not reserved, mark QPN free in the bitmap. */
+ if (qp->ibqp.qp_num > 1)
+ free_qpn(qpt, qp->ibqp.qp_num);
+
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+}
+
+/**
+ * ipath_free_all_qps - remove all QPs from the table
+ * @qpt: the QP table to empty
+ */
+void ipath_free_all_qps(struct ipath_qp_table *qpt)
+{
+ unsigned long flags;
+ struct ipath_qp *qp, *nqp;
+ u32 n;
+
+ for (n = 0; n < qpt->max; n++) {
+ spin_lock_irqsave(&qpt->lock, flags);
+ qp = qpt->table[n];
+ qpt->table[n] = NULL;
+ spin_unlock_irqrestore(&qpt->lock, flags);
+
+ while (qp) {
+ nqp = qp->next;
+ if (qp->ibqp.qp_num > 1)
+ free_qpn(qpt, qp->ibqp.qp_num);
+ if (!atomic_dec_and_test(&qp->refcount) ||
+ !ipath_destroy_qp(&qp->ibqp))
+ _VERBS_INFO("QP memory leak!\n");
+ qp = nqp;
+ }
+ }
+
+ for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
+ if (qpt->map[n].page)
+ free_page((unsigned long)qpt->map[n].page);
+ }
+}
+
+/**
+ * ipath_lookup_qpn - return the QP with the given QPN
+ * @qpt: the QP table
+ * @qpn: the QP number to look up
+ *
+ * The caller is responsible for decrementing the QP reference count
+ * when done.
+ */
+struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
+{
+ unsigned long flags;
+ struct ipath_qp *qp;
+
+ spin_lock_irqsave(&qpt->lock, flags);
+
+ for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
+ if (qp->ibqp.qp_num == qpn) {
+ atomic_inc(&qp->refcount);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&qpt->lock, flags);
+ return qp;
+}
+
+/**
+ * ipath_reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ */
+static void ipath_reset_qp(struct ipath_qp *qp)
+{
+ qp->remote_qpn = 0;
+ qp->qkey = 0;
+ qp->qp_access_flags = 0;
+ qp->s_hdrwords = 0;
+ qp->s_psn = 0;
+ qp->r_psn = 0;
+ atomic_set(&qp->msn, 0);
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ qp->s_state = IB_OPCODE_RC_SEND_LAST;
+ qp->r_state = IB_OPCODE_RC_SEND_LAST;
+ } else {
+ qp->s_state = IB_OPCODE_UC_SEND_LAST;
+ qp->r_state = IB_OPCODE_UC_SEND_LAST;
+ }
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->s_nak_state = 0;
+ qp->s_rnr_timeout = 0;
+ qp->s_head = 0;
+ qp->s_tail = 0;
+ qp->s_cur = 0;
+ qp->s_last = 0;
+ qp->s_ssn = 1;
+ qp->s_lsn = 0;
+ qp->r_rq.head = 0;
+ qp->r_rq.tail = 0;
+ qp->r_reuse_sge = 0;
+}
+
+/**
+ * ipath_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ *
+ * Returns 0 on success, otherwise returns an errno.
+ */
+int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct ipath_qp *qp = to_iqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ spin_lock(&qp->s_lock);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask))
+ goto inval;
+
+ switch (new_state) {
+ case IB_QPS_RESET:
+ ipath_reset_qp(qp);
+ break;
+
+ case IB_QPS_ERR:
+ ipath_error_qp(qp);
+ break;
+
+ default:
+ break;
+
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ struct ipath_ibdev *dev = to_idev(ibqp->device);
+
+ if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
+ goto inval;
+ qp->s_pkey_index = attr->pkey_index;
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ qp->remote_qpn = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ qp->s_next_psn = attr->sq_psn;
+ qp->s_last_psn = qp->s_next_psn - 1;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ qp->r_psn = attr->rq_psn;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->qp_access_flags = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_AV) {
+ if (attr->ah_attr.dlid == 0 ||
+ attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
+ goto inval;
+ qp->remote_ah_attr = attr->ah_attr;
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ qp->path_mtu = attr->path_mtu;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ qp->s_rnr_retry = attr->rnr_retry;
+ if (qp->s_rnr_retry > 7)
+ qp->s_rnr_retry = 7;
+ qp->s_rnr_retry_cnt = qp->s_rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+ qp->s_min_rnr_timer = attr->min_rnr_timer;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ qp->s_pkey_index = attr->pkey_index;
+
+ qp->state = new_state;
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+
+ /*
+ * If QP1 changed to the RTS state, try to move to the link to INIT
+ * even if it was ACTIVE so the SM will reinitialize the SMA's
+ * state.
+ */
+ if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) {
+ struct ipath_ibdev *dev = to_idev(ibqp->device);
+
+ ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
+ }
+ ret = 0;
+ goto bail;
+
+inval:
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct ipath_qp *qp = to_iqp(ibqp);
+
+ attr->qp_state = qp->state;
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = qp->path_mtu;
+ attr->path_mig_state = 0;
+ attr->qkey = qp->qkey;
+ attr->rq_psn = qp->r_psn;
+ attr->sq_psn = qp->s_next_psn;
+ attr->dest_qp_num = qp->remote_qpn;
+ attr->qp_access_flags = qp->qp_access_flags;
+ attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_recv_wr = qp->r_rq.size - 1;
+ attr->cap.max_send_sge = qp->s_max_sge;
+ attr->cap.max_recv_sge = qp->r_rq.max_sge;
+ attr->cap.max_inline_data = 0;
+ attr->ah_attr = qp->remote_ah_attr;
+ memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
+ attr->pkey_index = qp->s_pkey_index;
+ attr->alt_pkey_index = 0;
+ attr->en_sqd_async_notify = 0;
+ attr->sq_draining = 0;
+ attr->max_rd_atomic = 1;
+ attr->max_dest_rd_atomic = 1;
+ attr->min_rnr_timer = qp->s_min_rnr_timer;
+ attr->port_num = 1;
+ attr->timeout = 0;
+ attr->retry_cnt = qp->s_retry_cnt;
+ attr->rnr_retry = qp->s_rnr_retry;
+ attr->alt_port_num = 0;
+ attr->alt_timeout = 0;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ init_attr->sq_sig_type =
+ (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
+ ? IB_SIGNAL_REQ_WR : 0;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->port_num = 1;
+ return 0;
+}
+
+/**
+ * ipath_compute_aeth - compute the AETH (syndrome + MSN)
+ * @qp: the queue pair to compute the AETH for
+ *
+ * Returns the AETH.
+ *
+ * The QP s_lock should be held.
+ */
+__be32 ipath_compute_aeth(struct ipath_qp *qp)
+{
+ u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK;
+
+ if (qp->s_nak_state) {
+ aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT;
+ } else if (qp->ibqp.srq) {
+ /*
+ * Shared receive queues don't generate credits.
+ * Set the credit field to the invalid value.
+ */
+ aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT;
+ } else {
+ u32 min, max, x;
+ u32 credits;
+
+ /*
+ * Compute the number of credits available (RWQEs).
+ * XXX Not holding the r_rq.lock here so there is a small
+ * chance that the pair of reads are not atomic.
+ */
+ credits = qp->r_rq.head - qp->r_rq.tail;
+ if ((int)credits < 0)
+ credits += qp->r_rq.size;
+ /*
+ * Binary search the credit table to find the code to
+ * use.
+ */
+ min = 0;
+ max = 31;
+ for (;;) {
+ x = (min + max) / 2;
+ if (credit_table[x] == credits)
+ break;
+ if (credit_table[x] > credits)
+ max = x;
+ else if (min == x)
+ break;
+ else
+ min = x;
+ }
+ aeth |= x << IPS_AETH_CREDIT_SHIFT;
+ }
+ return cpu_to_be32(aeth);
+}
+
+/**
+ * ipath_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: unused by InfiniPath
+ *
+ * Returns the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ipath_qp *qp;
+ int err;
+ struct ipath_swqe *swq = NULL;
+ struct ipath_ibdev *dev;
+ size_t sz;
+ struct ib_qp *ret;
+
+ if (init_attr->cap.max_send_sge > 255 ||
+ init_attr->cap.max_recv_sge > 255) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ sz = sizeof(struct ipath_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct ipath_swqe);
+ swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+ if (swq == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ qp = kmalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ sz = sizeof(struct ipath_sge) *
+ init_attr->cap.max_recv_sge +
+ sizeof(struct ipath_rwqe);
+ qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
+ if (!qp->r_rq.wq) {
+ kfree(qp);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ /*
+ * ib_create_qp() will initialize qp->ibqp
+ * except for qp->ibqp.qp_num.
+ */
+ spin_lock_init(&qp->s_lock);
+ spin_lock_init(&qp->r_rq.lock);
+ atomic_set(&qp->refcount, 0);
+ init_waitqueue_head(&qp->wait);
+ tasklet_init(&qp->s_task,
+ init_attr->qp_type == IB_QPT_RC ?
+ ipath_do_rc_send : ipath_do_uc_send,
+ (unsigned long)qp);
+ qp->piowait.next = LIST_POISON1;
+ qp->piowait.prev = LIST_POISON2;
+ qp->timerwait.next = LIST_POISON1;
+ qp->timerwait.prev = LIST_POISON2;
+ qp->state = IB_QPS_RESET;
+ qp->s_wq = swq;
+ qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_max_sge = init_attr->cap.max_send_sge;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
+ 1 << IPATH_S_SIGNAL_REQ_WR : 0;
+ dev = to_idev(ibpd->device);
+ err = ipath_alloc_qpn(&dev->qp_table, qp,
+ init_attr->qp_type);
+ if (err) {
+ vfree(swq);
+ vfree(qp->r_rq.wq);
+ kfree(qp);
+ ret = ERR_PTR(err);
+ goto bail;
+ }
+ ipath_reset_qp(qp);
+
+ /* Tell the core driver that the kernel SMA is present. */
+ if (qp->ibqp.qp_type == IB_QPT_SMI)
+ ipath_layer_set_verbs_flags(dev->dd,
+ IPATH_VERBS_KERNEL_SMA);
+ break;
+
+ default:
+ /* Don't support raw QPs */
+ ret = ERR_PTR(-ENOSYS);
+ goto bail;
+ }
+
+ init_attr->cap.max_inline_data = 0;
+
+ ret = &qp->ibqp;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Returns 0 on success.
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ */
+int ipath_destroy_qp(struct ib_qp *ibqp)
+{
+ struct ipath_qp *qp = to_iqp(ibqp);
+ struct ipath_ibdev *dev = to_idev(ibqp->device);
+ unsigned long flags;
+
+ /* Tell the core driver that the kernel SMA is gone. */
+ if (qp->ibqp.qp_type == IB_QPT_SMI)
+ ipath_layer_set_verbs_flags(dev->dd, 0);
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ spin_lock(&qp->s_lock);
+ qp->state = IB_QPS_ERR;
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+
+ /* Stop the sending tasklet. */
+ tasklet_kill(&qp->s_task);
+
+ /* Make sure the QP isn't on the timeout list. */
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ if (qp->timerwait.next != LIST_POISON1)
+ list_del(&qp->timerwait);
+ if (qp->piowait.next != LIST_POISON1)
+ list_del(&qp->piowait);
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ /*
+ * Make sure that the QP is not in the QPN table so receive
+ * interrupts will discard packets for this QP. XXX Also remove QP
+ * from multicast table.
+ */
+ if (atomic_read(&qp->refcount) != 0)
+ ipath_free_qp(&dev->qp_table, qp);
+
+ vfree(qp->s_wq);
+ vfree(qp->r_rq.wq);
+ kfree(qp);
+ return 0;
+}
+
+/**
+ * ipath_init_qp_table - initialize the QP table for a device
+ * @idev: the device who's QP table we're initializing
+ * @size: the size of the QP table
+ *
+ * Returns 0 on success, otherwise returns an errno.
+ */
+int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
+{
+ int i;
+ int ret;
+
+ idev->qp_table.last = 1; /* QPN 0 and 1 are special. */
+ idev->qp_table.max = size;
+ idev->qp_table.nmaps = 1;
+ idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table),
+ GFP_KERNEL);
+ if (idev->qp_table.table == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
+ atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
+ idev->qp_table.map[i].page = NULL;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_sqerror_qp - put a QP's send queue into an error state
+ * @qp: QP who's send queue will be put into an error state
+ * @wc: the WC responsible for putting the QP in this state
+ *
+ * Flushes the send work queue.
+ * The QP s_lock should be held.
+ */
+
+void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+
+ _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n",
+ qp->ibqp.qp_num, qp->remote_qpn, wc->status);
+
+ spin_lock(&dev->pending_lock);
+ /* XXX What if its already removed by the timeout code? */
+ if (qp->timerwait.next != LIST_POISON1)
+ list_del(&qp->timerwait);
+ if (qp->piowait.next != LIST_POISON1)
+ list_del(&qp->piowait);
+ spin_unlock(&dev->pending_lock);
+
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+
+ while (qp->s_last != qp->s_head) {
+ wc->wr_id = wqe->wr.wr_id;
+ wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ }
+ qp->s_cur = qp->s_tail = qp->s_head;
+ qp->state = IB_QPS_SQE;
+}
+
+/**
+ * ipath_error_qp - put a QP into an error state
+ * @qp: the QP to put into an error state
+ *
+ * Flushes both send and receive work queues.
+ * QP r_rq.lock and s_lock should be held.
+ */
+
+void ipath_error_qp(struct ipath_qp *qp)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_wc wc;
+
+ _VERBS_INFO("QP%d/%d in error state\n",
+ qp->ibqp.qp_num, qp->remote_qpn);
+
+ spin_lock(&dev->pending_lock);
+ /* XXX What if its already removed by the timeout code? */
+ if (qp->timerwait.next != LIST_POISON1)
+ list_del(&qp->timerwait);
+ if (qp->piowait.next != LIST_POISON1)
+ list_del(&qp->piowait);
+ spin_unlock(&dev->pending_lock);
+
+ wc.status = IB_WC_WR_FLUSH_ERR;
+ wc.vendor_err = 0;
+ wc.byte_len = 0;
+ wc.imm_data = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = 0;
+ wc.wc_flags = 0;
+ wc.pkey_index = 0;
+ wc.slid = 0;
+ wc.sl = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+
+ while (qp->s_last != qp->s_head) {
+ struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+
+ wc.wr_id = wqe->wr.wr_id;
+ wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
+ }
+ qp->s_cur = qp->s_tail = qp->s_head;
+ qp->s_hdrwords = 0;
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+
+ wc.opcode = IB_WC_RECV;
+ while (qp->r_rq.tail != qp->r_rq.head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
+ if (++qp->r_rq.tail >= qp->r_rq.size)
+ qp->r_rq.tail = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+}
+
+/**
+ * ipath_get_credit - flush the send work queue of a QP
+ * @qp: the qp who's send work queue to flush
+ * @aeth: the Acknowledge Extended Transport Header
+ *
+ * The QP s_lock should be held.
+ */
+void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
+{
+ u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK;
+
+ /*
+ * If the credit is invalid, we can send
+ * as many packets as we like. Otherwise, we have to
+ * honor the credit field.
+ */
+ if (credit == IPS_AETH_CREDIT_INVAL) {
+ qp->s_lsn = (u32) -1;
+ } else if (qp->s_lsn != (u32) -1) {
+ /* Compute new LSN (i.e., MSN + credit) */
+ credit = (aeth + credit_table[credit]) & IPS_MSN_MASK;
+ if (ipath_cmp24(credit, qp->s_lsn) > 0)
+ qp->s_lsn = credit;
+ }
+
+ /* Restart sending if it was blocked due to lack of credits. */
+ if (qp->s_cur != qp->s_head &&
+ (qp->s_lsn == (u32) -1 ||
+ ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
+ qp->s_lsn + 1) <= 0))
+ tasklet_hi_schedule(&qp->s_task);
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
new file mode 100644
index 0000000000000..a4055ca006142
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -0,0 +1,1857 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ipath_verbs.h"
+#include "ips_common.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_RC_##x
+
+/**
+ * ipath_init_restart- initialize the qp->s_sge after a restart
+ * @qp: the QP who's SGE we're restarting
+ * @wqe: the work queue to initialize the QP's SGE from
+ *
+ * The QP s_lock should be held.
+ */
+static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
+{
+ struct ipath_ibdev *dev;
+ u32 len;
+
+ len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
+ ib_mtu_enum_to_int(qp->path_mtu);
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ ipath_skip_sge(&qp->s_sge, len);
+ qp->s_len = wqe->length - len;
+ dev = to_idev(qp->ibqp.device);
+ spin_lock(&dev->pending_lock);
+ if (qp->timerwait.next == LIST_POISON1)
+ list_add_tail(&qp->timerwait,
+ &dev->pending[dev->pending_index]);
+ spin_unlock(&dev->pending_lock);
+}
+
+/**
+ * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @pmtu: the path MTU
+ *
+ * Return bth0 if constructed; otherwise, return 0.
+ * Note the QP s_lock must be held.
+ */
+static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
+ struct ipath_other_headers *ohdr,
+ u32 pmtu)
+{
+ struct ipath_sge_state *ss;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+
+ /*
+ * Send a response. Note that we are in the responder's
+ * side of the QP context.
+ */
+ switch (qp->s_ack_state) {
+ case OP(RDMA_READ_REQUEST):
+ ss = &qp->s_rdma_sge;
+ len = qp->s_rdma_len;
+ if (len > pmtu) {
+ len = pmtu;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
+ }
+ else
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ qp->s_rdma_len -= len;
+ bth0 = qp->s_ack_state << 24;
+ ohdr->u.aeth = ipath_compute_aeth(qp);
+ hwords++;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ ss = &qp->s_rdma_sge;
+ len = qp->s_rdma_len;
+ if (len > pmtu)
+ len = pmtu;
+ else {
+ ohdr->u.aeth = ipath_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ }
+ qp->s_rdma_len -= len;
+ bth0 = qp->s_ack_state << 24;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ /*
+ * We have to prevent new requests from changing
+ * the r_sge state while a ipath_verbs_send()
+ * is in progress.
+ * Changing r_state allows the receiver
+ * to continue processing new packets.
+ * We do it here now instead of above so
+ * that we are sure the packet was sent before
+ * changing the state.
+ */
+ qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ return 0;
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD):
+ ss = NULL;
+ len = 0;
+ qp->r_state = OP(SEND_LAST);
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
+ ohdr->u.at.aeth = ipath_compute_aeth(qp);
+ ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
+ hwords += sizeof(ohdr->u.at) / 4;
+ break;
+
+ default:
+ /* Send a regular ACK. */
+ ss = NULL;
+ len = 0;
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ bth0 = qp->s_ack_state << 24;
+ ohdr->u.aeth = ipath_compute_aeth(qp);
+ hwords++;
+ }
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = ss;
+ qp->s_cur_size = len;
+
+ return bth0;
+}
+
+/**
+ * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @pmtu: the path MTU
+ * @bth0p: pointer to the BTH opcode word
+ * @bth2p: pointer to the BTH PSN word
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ * Note the QP s_lock must be held.
+ */
+static inline int ipath_make_rc_req(struct ipath_qp *qp,
+ struct ipath_other_headers *ohdr,
+ u32 pmtu, u32 *bth0p, u32 *bth2p)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ipath_sge_state *ss;
+ struct ipath_swqe *wqe;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+ u32 bth2;
+ char newreq;
+
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
+ qp->s_rnr_timeout)
+ goto done;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ bth0 = 0;
+
+ /* Send a request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ switch (qp->s_state) {
+ default:
+ /*
+ * Resend an old request or start a new one.
+ *
+ * We keep track of the current SWQE so that
+ * we don't reset the "furthest progress" state
+ * if we need to back up.
+ */
+ newreq = 0;
+ if (qp->s_cur == qp->s_tail) {
+ /* Check if send work queue is empty. */
+ if (qp->s_tail == qp->s_head)
+ goto done;
+ qp->s_psn = wqe->psn = qp->s_next_psn;
+ newreq = 1;
+ }
+ /*
+ * Note that we have to be careful not to modify the
+ * original work request since we may need to resend
+ * it.
+ */
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_len = len = wqe->length;
+ ss = &qp->s_sge;
+ bth2 = 0;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ /* If no credit, return. */
+ if (qp->s_lsn != (u32) -1 &&
+ ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
+ goto done;
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ bth2 = 1 << 31; /* Request ACK. */
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ if (newreq)
+ qp->s_lsn++;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (qp->s_lsn != (u32) -1 &&
+ ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
+ goto done;
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / 4;
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes
+ * after RETH */
+ ohdr->u.rc.imm_data = wqe->wr.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ }
+ bth2 = 1 << 31; /* Request ACK. */
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_READ:
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / 4;
+ if (newreq) {
+ qp->s_lsn++;
+ /*
+ * Adjust s_next_psn to count the
+ * expected number of responses.
+ */
+ if (len > pmtu)
+ qp->s_next_psn += (len - 1) / pmtu;
+ wqe->lpsn = qp->s_next_psn++;
+ }
+ ss = NULL;
+ len = 0;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
+ qp->s_state = OP(COMPARE_SWAP);
+ else
+ qp->s_state = OP(FETCH_ADD);
+ ohdr->u.atomic_eth.vaddr = cpu_to_be64(
+ wqe->wr.wr.atomic.remote_addr);
+ ohdr->u.atomic_eth.rkey = cpu_to_be32(
+ wqe->wr.wr.atomic.rkey);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.swap);
+ ohdr->u.atomic_eth.compare_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ hwords += sizeof(struct ib_atomic_eth) / 4;
+ if (newreq) {
+ qp->s_lsn++;
+ wqe->lpsn = wqe->psn;
+ }
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ ss = NULL;
+ len = 0;
+ break;
+
+ default:
+ goto done;
+ }
+ if (newreq) {
+ qp->s_tail++;
+ if (qp->s_tail >= qp->s_size)
+ qp->s_tail = 0;
+ }
+ bth2 |= qp->s_psn++ & IPS_PSN_MASK;
+ if ((int)(qp->s_psn - qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ spin_lock(&dev->pending_lock);
+ if (qp->timerwait.next == LIST_POISON1)
+ list_add_tail(&qp->timerwait,
+ &dev->pending[dev->pending_index]);
+ spin_unlock(&dev->pending_lock);
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ /*
+ * This case can only happen if a send is restarted. See
+ * ipath_restart_rc().
+ */
+ ipath_init_restart(qp, wqe);
+ /* FALLTHROUGH */
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ bth2 = qp->s_psn++ & IPS_PSN_MASK;
+ if ((int)(qp->s_psn - qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ /*
+ * Request an ACK every 1/2 MB to avoid retransmit
+ * timeouts.
+ */
+ if (((wqe->length - len) % (512 * 1024)) == 0)
+ bth2 |= 1 << 31;
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ bth2 |= 1 << 31; /* Request ACK. */
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /*
+ * This case can only happen if a RDMA write is restarted.
+ * See ipath_restart_rc().
+ */
+ ipath_init_restart(qp, wqe);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ bth2 = qp->s_psn++ & IPS_PSN_MASK;
+ if ((int)(qp->s_psn - qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ /*
+ * Request an ACK every 1/2 MB to avoid retransmit
+ * timeouts.
+ */
+ if (((wqe->length - len) % (512 * 1024)) == 0)
+ bth2 |= 1 << 31;
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ }
+ bth2 |= 1 << 31; /* Request ACK. */
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /*
+ * This case can only happen if a RDMA read is restarted.
+ * See ipath_restart_rc().
+ */
+ ipath_init_restart(qp, wqe);
+ len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / 4;
+ bth2 = qp->s_psn++ & IPS_PSN_MASK;
+ if ((int)(qp->s_psn - qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = NULL;
+ len = 0;
+ qp->s_cur++;
+ if (qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_REQUEST):
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD):
+ /*
+ * We shouldn't start anything new until this request is
+ * finished. The ACK will handle rescheduling us. XXX The
+ * number of outstanding ones is negotiated at connection
+ * setup time (see pg. 258,289)? XXX Also, if we support
+ * multiple outstanding requests, we need to check the WQE
+ * IB_SEND_FENCE flag and not send a new request if a RDMA
+ * read or atomic is pending.
+ */
+ goto done;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = ss;
+ qp->s_cur_size = len;
+ *bth0p = bth0 | (qp->s_state << 24);
+ *bth2p = bth2;
+ return 1;
+
+done:
+ return 0;
+}
+
+static inline void ipath_make_rc_grh(struct ipath_qp *qp,
+ struct ib_global_route *grh,
+ u32 nwords)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+
+ /* GRH header size in 32-bit words. */
+ qp->s_hdrwords += 10;
+ qp->s_hdr.u.l.grh.version_tclass_flow =
+ cpu_to_be32((6 << 28) |
+ (grh->traffic_class << 20) |
+ grh->flow_label);
+ qp->s_hdr.u.l.grh.paylen =
+ cpu_to_be16(((qp->s_hdrwords - 12) + nwords +
+ SIZE_OF_CRC) << 2);
+ /* next_hdr is defined by C8-7 in ch. 8.4.1 */
+ qp->s_hdr.u.l.grh.next_hdr = 0x1B;
+ qp->s_hdr.u.l.grh.hop_limit = grh->hop_limit;
+ /* The SGID is 32-bit aligned. */
+ qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix;
+ qp->s_hdr.u.l.grh.sgid.global.interface_id =
+ ipath_layer_get_guid(dev->dd);
+ qp->s_hdr.u.l.grh.dgid = grh->dgid;
+}
+
+/**
+ * ipath_do_rc_send - perform a send on an RC QP
+ * @data: contains a pointer to the QP
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted. Only allow one CPU to send a packet per QP (tasklet).
+ * Otherwise, after we drop the QP s_lock, two threads could send
+ * packets out of order.
+ */
+void ipath_do_rc_send(unsigned long data)
+{
+ struct ipath_qp *qp = (struct ipath_qp *)data;
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ unsigned long flags;
+ u16 lrh0;
+ u32 nwords;
+ u32 extra_bytes;
+ u32 bth0;
+ u32 bth2;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ struct ipath_other_headers *ohdr;
+
+ if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
+ goto bail;
+
+ if (unlikely(qp->remote_ah_attr.dlid ==
+ ipath_layer_get_lid(dev->dd))) {
+ struct ib_wc wc;
+
+ /*
+ * Pass in an uninitialized ib_wc to be consistent with
+ * other places where ipath_ruc_loopback() is called.
+ */
+ ipath_ruc_loopback(qp, &wc);
+ goto clear;
+ }
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+again:
+ /* Check for a constructed packet to be sent. */
+ if (qp->s_hdrwords != 0) {
+ /*
+ * If no PIO bufs are available, return. An interrupt will
+ * call ipath_ib_piobufavail() when one is available.
+ */
+ _VERBS_INFO("h %u %p\n", qp->s_hdrwords, &qp->s_hdr);
+ _VERBS_INFO("d %u %p %u %p %u %u %u %u\n", qp->s_cur_size,
+ qp->s_cur_sge->sg_list,
+ qp->s_cur_sge->num_sge,
+ qp->s_cur_sge->sge.vaddr,
+ qp->s_cur_sge->sge.sge_length,
+ qp->s_cur_sge->sge.length,
+ qp->s_cur_sge->sge.m,
+ qp->s_cur_sge->sge.n);
+ if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
+ (u32 *) &qp->s_hdr, qp->s_cur_size,
+ qp->s_cur_sge)) {
+ ipath_no_bufs_available(qp, dev);
+ goto bail;
+ }
+ dev->n_unicast_xmit++;
+ /* Record that we sent the packet and s_hdr is empty. */
+ qp->s_hdrwords = 0;
+ }
+
+ /*
+ * The lock is needed to synchronize between setting
+ * qp->s_ack_state, resend timer, and post_send().
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Sending responses has higher priority over sending requests. */
+ if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
+ (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
+ bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
+ else if (!ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2))
+ goto done;
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Construct the header. */
+ extra_bytes = (4 - qp->s_cur_size) & 3;
+ nwords = (qp->s_cur_size + extra_bytes) >> 2;
+ lrh0 = IPS_LRH_BTH;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, nwords);
+ lrh0 = IPS_LRH_GRH;
+ }
+ lrh0 |= qp->remote_ah_attr.sl << 4;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
+ SIZE_OF_CRC);
+ qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
+ bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+ bth0 |= extra_bytes << 20;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(bth2);
+
+ /* Check for more work to do. */
+ goto again;
+
+done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+clear:
+ clear_bit(IPATH_S_BUSY, &qp->s_flags);
+bail:
+ return;
+}
+
+static void send_rc_ack(struct ipath_qp *qp)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ u16 lrh0;
+ u32 bth0;
+ struct ipath_other_headers *ohdr;
+
+ /* Construct the header. */
+ ohdr = &qp->s_hdr.u.oth;
+ lrh0 = IPS_LRH_BTH;
+ /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
+ qp->s_hdrwords = 6;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, 0);
+ ohdr = &qp->s_hdr.u.l.oth;
+ lrh0 = IPS_LRH_GRH;
+ }
+ bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+ ohdr->u.aeth = ipath_compute_aeth(qp);
+ if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
+ bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
+ ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
+ qp->s_hdrwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
+ }
+ else
+ bth0 |= OP(ACKNOWLEDGE) << 24;
+ lrh0 |= qp->remote_ah_attr.sl << 4;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + SIZE_OF_CRC);
+ qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
+
+ /*
+ * If we can send the ACK, clear the ACK state.
+ */
+ if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr,
+ 0, NULL) == 0) {
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ dev->n_rc_qacks++;
+ dev->n_unicast_xmit++;
+ }
+}
+
+/**
+ * ipath_restart_rc - back up requester to resend the last un-ACKed request
+ * @qp: the QP to restart
+ * @psn: packet sequence number for the request
+ * @wc: the work completion request
+ *
+ * The QP s_lock should be held.
+ */
+void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
+{
+ struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+ struct ipath_ibdev *dev;
+ u32 n;
+
+ /*
+ * If there are no requests pending, we are done.
+ */
+ if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
+ qp->s_last == qp->s_tail)
+ goto done;
+
+ if (qp->s_retry == 0) {
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = IB_WC_RETRY_EXC_ERR;
+ wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ wc->vendor_err = 0;
+ wc->byte_len = 0;
+ wc->qp_num = qp->ibqp.qp_num;
+ wc->src_qp = qp->remote_qpn;
+ wc->pkey_index = 0;
+ wc->slid = qp->remote_ah_attr.dlid;
+ wc->sl = qp->remote_ah_attr.sl;
+ wc->dlid_path_bits = 0;
+ wc->port_num = 0;
+ ipath_sqerror_qp(qp, wc);
+ goto bail;
+ }
+ qp->s_retry--;
+
+ /*
+ * Remove the QP from the timeout queue.
+ * Note: it may already have been removed by ipath_ib_timer().
+ */
+ dev = to_idev(qp->ibqp.device);
+ spin_lock(&dev->pending_lock);
+ if (qp->timerwait.next != LIST_POISON1)
+ list_del(&qp->timerwait);
+ spin_unlock(&dev->pending_lock);
+
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ dev->n_rc_resends++;
+ else
+ dev->n_rc_resends += (int)qp->s_psn - (int)psn;
+
+ /*
+ * If we are starting the request from the beginning, let the normal
+ * send code handle initialization.
+ */
+ qp->s_cur = qp->s_last;
+ if (ipath_cmp24(psn, wqe->psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = wqe->psn;
+ } else {
+ n = qp->s_cur;
+ for (;;) {
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail) {
+ if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
+ qp->s_cur = n;
+ wqe = get_swqe_ptr(qp, n);
+ }
+ break;
+ }
+ wqe = get_swqe_ptr(qp, n);
+ if (ipath_cmp24(psn, wqe->psn) < 0)
+ break;
+ qp->s_cur = n;
+ }
+ qp->s_psn = psn;
+
+ /*
+ * Reset the state to restart in the middle of a request.
+ * Don't change the s_sge, s_cur_sge, or s_cur_size.
+ * See ipath_do_rc_send().
+ */
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case IB_WR_RDMA_READ:
+ qp->s_state =
+ OP(RDMA_READ_RESPONSE_MIDDLE);
+ break;
+
+ default:
+ /*
+ * This case shouldn't happen since its only
+ * one PSN per req.
+ */
+ qp->s_state = OP(SEND_LAST);
+ }
+ }
+
+done:
+ tasklet_hi_schedule(&qp->s_task);
+
+bail:
+ return;
+}
+
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from ipath_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct ipath_qp *qp, u32 psn)
+{
+ struct ipath_swqe *wqe;
+ u32 n;
+
+ n = qp->s_cur;
+ wqe = get_swqe_ptr(qp, n);
+ for (;;) {
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail) {
+ if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
+ qp->s_cur = n;
+ wqe = get_swqe_ptr(qp, n);
+ }
+ break;
+ }
+ wqe = get_swqe_ptr(qp, n);
+ if (ipath_cmp24(psn, wqe->psn) < 0)
+ break;
+ qp->s_cur = n;
+ }
+ qp->s_psn = psn;
+
+ /*
+ * Set the state to restart in the middle of a
+ * request. Don't change the s_sge, s_cur_sge, or
+ * s_cur_size. See ipath_do_rc_send().
+ */
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case IB_WR_RDMA_READ:
+ qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ break;
+
+ default:
+ /*
+ * This case shouldn't happen since its only
+ * one PSN per req.
+ */
+ qp->s_state = OP(SEND_LAST);
+ }
+}
+
+/**
+ * do_rc_ack - process an incoming RC ACK
+ * @qp: the QP the ACK came in on
+ * @psn: the packet sequence number of the ACK
+ * @opcode: the opcode of the request that resulted in the ACK
+ *
+ * This is called from ipath_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ * Returns 1 if OK, 0 if current operation should be aborted (NAK).
+ */
+static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_wc wc;
+ struct ipath_swqe *wqe;
+ int ret = 0;
+
+ /*
+ * Remove the QP from the timeout queue (or RNR timeout queue).
+ * If ipath_ib_timer() has already removed it,
+ * it's OK since we hold the QP s_lock and ipath_restart_rc()
+ * just won't find anything to restart if we ACK everything.
+ */
+ spin_lock(&dev->pending_lock);
+ if (qp->timerwait.next != LIST_POISON1)
+ list_del(&qp->timerwait);
+ spin_unlock(&dev->pending_lock);
+
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+ * before the NAK'ed request. The MSN won't include the NAK'ed
+ * request but will include an ACK'ed request(s).
+ */
+ wqe = get_swqe_ptr(qp, qp->s_last);
+
+ /* Nothing is pending to ACK/NAK. */
+ if (qp->s_last == qp->s_tail)
+ goto bail;
+
+ /*
+ * The MSN might be for a later WQE than the PSN indicates so
+ * only complete WQEs that the PSN finishes.
+ */
+ while (ipath_cmp24(psn, wqe->lpsn) >= 0) {
+ /* If we are ACKing a WQE, the MSN should be >= the SSN. */
+ if (ipath_cmp24(aeth, wqe->ssn) < 0)
+ break;
+ /*
+ * If this request is a RDMA read or atomic, and the ACK is
+ * for a later operation, this ACK NAKs the RDMA read or
+ * atomic. In other words, only a RDMA_READ_LAST or ONLY
+ * can ACK a RDMA read and likewise for atomic ops. Note
+ * that the NAK case can only happen if relaxed ordering is
+ * used and requests are sent after an RDMA read or atomic
+ * is sent but before the response is received.
+ */
+ if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
+ opcode != OP(RDMA_READ_RESPONSE_LAST)) ||
+ ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
+ (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
+ ipath_cmp24(wqe->psn, psn) != 0))) {
+ /*
+ * The last valid PSN seen is the previous
+ * request's.
+ */
+ qp->s_last_psn = wqe->psn - 1;
+ /* Retry this request. */
+ ipath_restart_rc(qp, wqe->psn, &wc);
+ /*
+ * No need to process the ACK/NAK since we are
+ * restarting an earlier request.
+ */
+ goto bail;
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ wc.vendor_err = 0;
+ wc.byte_len = wqe->length;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = qp->remote_qpn;
+ wc.pkey_index = 0;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ qp->s_retry = qp->s_retry_cnt;
+ /*
+ * If we are completing a request which is in the process of
+ * being resent, we can stop resending it since we know the
+ * responder has already seen it.
+ */
+ if (qp->s_last == qp->s_cur) {
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = wqe->psn;
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ if (qp->s_last == qp->s_tail)
+ break;
+ }
+
+ switch (aeth >> 29) {
+ case 0: /* ACK */
+ dev->n_rc_acks++;
+ /* If this is a partial ACK, reset the retransmit timer. */
+ if (qp->s_last != qp->s_tail) {
+ spin_lock(&dev->pending_lock);
+ list_add_tail(&qp->timerwait,
+ &dev->pending[dev->pending_index]);
+ spin_unlock(&dev->pending_lock);
+ }
+ ipath_get_credit(qp, aeth);
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ qp->s_retry = qp->s_retry_cnt;
+ qp->s_last_psn = psn;
+ ret = 1;
+ goto bail;
+
+ case 1: /* RNR NAK */
+ dev->n_rnr_naks++;
+ if (qp->s_rnr_retry == 0) {
+ if (qp->s_last == qp->s_tail)
+ goto bail;
+
+ wc.status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7)
+ qp->s_rnr_retry--;
+ if (qp->s_last == qp->s_tail)
+ goto bail;
+
+ /* The last valid PSN seen is the previous request's. */
+ qp->s_last_psn = wqe->psn - 1;
+
+ dev->n_rc_resends += (int)qp->s_psn - (int)psn;
+
+ /*
+ * If we are starting the request from the beginning, let
+ * the normal send code handle initialization.
+ */
+ qp->s_cur = qp->s_last;
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ if (ipath_cmp24(psn, wqe->psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = wqe->psn;
+ } else
+ reset_psn(qp, psn);
+
+ qp->s_rnr_timeout =
+ ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
+ IPS_AETH_CREDIT_MASK];
+ ipath_insert_rnr_queue(qp);
+ goto bail;
+
+ case 3: /* NAK */
+ /* The last valid PSN seen is the previous request's. */
+ if (qp->s_last != qp->s_tail)
+ qp->s_last_psn = wqe->psn - 1;
+ switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
+ IPS_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
+ dev->n_seq_naks++;
+ /*
+ * Back up to the responder's expected PSN. XXX
+ * Note that we might get a NAK in the middle of an
+ * RDMA READ response which terminates the RDMA
+ * READ.
+ */
+ if (qp->s_last == qp->s_tail)
+ break;
+
+ if (ipath_cmp24(psn, wqe->psn) < 0)
+ break;
+
+ /* Retry the request. */
+ ipath_restart_rc(qp, psn, &wc);
+ break;
+
+ case 1: /* Invalid Request */
+ wc.status = IB_WC_REM_INV_REQ_ERR;
+ dev->n_other_naks++;
+ goto class_b;
+
+ case 2: /* Remote Access Error */
+ wc.status = IB_WC_REM_ACCESS_ERR;
+ dev->n_other_naks++;
+ goto class_b;
+
+ case 3: /* Remote Operation Error */
+ wc.status = IB_WC_REM_OP_ERR;
+ dev->n_other_naks++;
+ class_b:
+ wc.wr_id = wqe->wr.wr_id;
+ wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ wc.vendor_err = 0;
+ wc.byte_len = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = qp->remote_qpn;
+ wc.pkey_index = 0;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ ipath_sqerror_qp(qp, &wc);
+ break;
+
+ default:
+ /* Ignore other reserved NAK error codes */
+ goto reserved;
+ }
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ goto bail;
+
+ default: /* 2: reserved */
+ reserved:
+ /* Ignore reserved NAK codes. */
+ goto bail;
+ }
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_rc_rcv_resp - process an incoming RC response packet
+ * @dev: the device this packet came in on
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @hdrsize: the header length
+ * @pmtu: the path MTU
+ * @header_in_data: true if part of the header data is in the data buffer
+ *
+ * This is called from ipath_rc_rcv() to process an incoming RC response
+ * packet for the given QP.
+ * Called at interrupt level.
+ */
+static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
+ struct ipath_other_headers *ohdr,
+ void *data, u32 tlen,
+ struct ipath_qp *qp,
+ u32 opcode,
+ u32 psn, u32 hdrsize, u32 pmtu,
+ int header_in_data)
+{
+ unsigned long flags;
+ struct ib_wc wc;
+ int diff;
+ u32 pad;
+ u32 aeth;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Ignore invalid responses. */
+ if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
+ goto ack_done;
+
+ /* Ignore duplicate responses. */
+ diff = ipath_cmp24(psn, qp->s_last_psn);
+ if (unlikely(diff <= 0)) {
+ /* Update credits for "ghost" ACKs */
+ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
+ if (!header_in_data)
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ else {
+ aeth = be32_to_cpu(((__be32 *) data)[0]);
+ data += sizeof(__be32);
+ }
+ if ((aeth >> 29) == 0)
+ ipath_get_credit(qp, aeth);
+ }
+ goto ack_done;
+ }
+
+ switch (opcode) {
+ case OP(ACKNOWLEDGE):
+ case OP(ATOMIC_ACKNOWLEDGE):
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ if (!header_in_data)
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ else {
+ aeth = be32_to_cpu(((__be32 *) data)[0]);
+ data += sizeof(__be32);
+ }
+ if (opcode == OP(ATOMIC_ACKNOWLEDGE))
+ *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
+ if (!do_rc_ack(qp, aeth, psn, opcode) ||
+ opcode != OP(RDMA_READ_RESPONSE_FIRST))
+ goto ack_done;
+ hdrsize += 4;
+ /*
+ * do_rc_ack() has already checked the PSN so skip
+ * the sequence check.
+ */
+ goto rdma_read;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /* no AETH, no ACK */
+ if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
+ dev->n_rdma_seq++;
+ ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
+ goto ack_done;
+ }
+ rdma_read:
+ if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
+ goto ack_done;
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto ack_done;
+ if (unlikely(pmtu >= qp->s_len))
+ goto ack_done;
+ /* We got a response so update the timeout. */
+ if (unlikely(qp->s_last == qp->s_tail ||
+ get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
+ IB_WR_RDMA_READ))
+ goto ack_done;
+ spin_lock(&dev->pending_lock);
+ if (qp->s_rnr_timeout == 0 &&
+ qp->timerwait.next != LIST_POISON1)
+ list_move_tail(&qp->timerwait,
+ &dev->pending[dev->pending_index]);
+ spin_unlock(&dev->pending_lock);
+ /*
+ * Update the RDMA receive state but do the copy w/o holding the
+ * locks and blocking interrupts. XXX Yet another place that
+ * affects relaxed RDMA order since we don't want s_sge modified.
+ */
+ qp->s_len -= pmtu;
+ qp->s_last_psn = psn;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ipath_copy_sge(&qp->s_sge, data, pmtu);
+ goto bail;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /* ACKs READ req. */
+ if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
+ dev->n_rdma_seq++;
+ ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
+ goto ack_done;
+ }
+ /* FALLTHROUGH */
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
+ goto ack_done;
+ /*
+ * Get the number of bytes the message was padded by.
+ */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 1 && <= pmtu.
+ * Remember to account for the AETH header (4) and
+ * ICRC (4).
+ */
+ if (unlikely(tlen <= (hdrsize + pad + 8))) {
+ /*
+ * XXX Need to generate an error CQ
+ * entry.
+ */
+ goto ack_done;
+ }
+ tlen -= hdrsize + pad + 8;
+ if (unlikely(tlen != qp->s_len)) {
+ /*
+ * XXX Need to generate an error CQ
+ * entry.
+ */
+ goto ack_done;
+ }
+ if (!header_in_data)
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ else {
+ aeth = be32_to_cpu(((__be32 *) data)[0]);
+ data += sizeof(__be32);
+ }
+ ipath_copy_sge(&qp->s_sge, data, tlen);
+ if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
+ /*
+ * Change the state so we contimue
+ * processing new requests.
+ */
+ qp->s_state = OP(SEND_LAST);
+ }
+ goto ack_done;
+ }
+
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+bail:
+ return;
+}
+
+/**
+ * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
+ * @dev: the device this packet came in on
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @diff: the difference between the PSN and the expected PSN
+ * @header_in_data: true if part of the header data is in the data buffer
+ *
+ * This is called from ipath_rc_rcv() to process an unexpected
+ * incoming RC packet for the given QP.
+ * Called at interrupt level.
+ * Return 1 if no more processing is needed; otherwise return 0 to
+ * schedule a response to be sent and the s_lock unlocked.
+ */
+static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
+ struct ipath_other_headers *ohdr,
+ void *data,
+ struct ipath_qp *qp,
+ u32 opcode,
+ u32 psn,
+ int diff,
+ int header_in_data)
+{
+ struct ib_reth *reth;
+
+ if (diff > 0) {
+ /*
+ * Packet sequence error.
+ * A NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if a RDMA read, atomic, or
+ * NAK is pending though.
+ */
+ spin_lock(&qp->s_lock);
+ if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
+ qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
+ qp->s_nak_state != 0) {
+ spin_unlock(&qp->s_lock);
+ goto done;
+ }
+ qp->s_ack_state = OP(SEND_ONLY);
+ qp->s_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->s_ack_psn = qp->r_psn;
+ goto resched;
+ }
+
+ /*
+ * Handle a duplicate request. Don't re-execute SEND, RDMA
+ * write or atomic op. Don't NAK errors, just silently drop
+ * the duplicate request. Note that r_sge, r_len, and
+ * r_rcv_len may be in use so don't modify them.
+ *
+ * We are supposed to ACK the earliest duplicate PSN but we
+ * can coalesce an outstanding duplicate ACK. We have to
+ * send the earliest so that RDMA reads can be restarted at
+ * the requester's expected PSN.
+ */
+ spin_lock(&qp->s_lock);
+ if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
+ ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
+ if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
+ qp->s_ack_psn = psn;
+ spin_unlock(&qp->s_lock);
+ goto done;
+ }
+ switch (opcode) {
+ case OP(RDMA_READ_REQUEST):
+ /*
+ * We have to be careful to not change s_rdma_sge
+ * while ipath_do_rc_send() is using it and not
+ * holding the s_lock.
+ */
+ if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
+ qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
+ spin_unlock(&qp->s_lock);
+ dev->n_rdma_dup_busy++;
+ goto done;
+ }
+ /* RETH comes after BTH */
+ if (!header_in_data)
+ reth = &ohdr->u.rc.reth;
+ else {
+ reth = (struct ib_reth *)data;
+ data += sizeof(*reth);
+ }
+ qp->s_rdma_len = be32_to_cpu(reth->length);
+ if (qp->s_rdma_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /*
+ * Address range must be a subset of the original
+ * request and start on pmtu boundaries.
+ */
+ ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
+ qp->s_rdma_len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto done;
+ } else {
+ qp->s_rdma_sge.sg_list = NULL;
+ qp->s_rdma_sge.num_sge = 0;
+ qp->s_rdma_sge.sge.mr = NULL;
+ qp->s_rdma_sge.sge.vaddr = NULL;
+ qp->s_rdma_sge.sge.length = 0;
+ qp->s_rdma_sge.sge.sge_length = 0;
+ }
+ break;
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD):
+ /*
+ * Check for the PSN of the last atomic operations
+ * performed and resend the result if found.
+ */
+ if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
+ spin_unlock(&qp->s_lock);
+ goto done;
+ }
+ qp->s_ack_atomic = qp->r_atomic_data;
+ break;
+ }
+ qp->s_ack_state = opcode;
+ qp->s_nak_state = 0;
+ qp->s_ack_psn = psn;
+resched:
+ return 0;
+
+done:
+ return 1;
+}
+
+/**
+ * ipath_rc_rcv - process an incoming RC packet
+ * @dev: the device this packet came in on
+ * @hdr: the header of this packet
+ * @has_grh: true if the header has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ *
+ * This is called from ipath_qp_rcv() to process an incoming RC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
+{
+ struct ipath_other_headers *ohdr;
+ u32 opcode;
+ u32 hdrsize;
+ u32 psn;
+ u32 pad;
+ unsigned long flags;
+ struct ib_wc wc;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ int diff;
+ struct ib_reth *reth;
+ int header_in_data;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12; /* LRH + BTH */
+ psn = be32_to_cpu(ohdr->bth[2]);
+ header_in_data = 0;
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
+ /*
+ * The header with GRH is 60 bytes and the core driver sets
+ * the eager header buffer size to 56 bytes so the last 4
+ * bytes of the BTH header (PSN) is in the data buffer.
+ */
+ header_in_data =
+ ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+ if (header_in_data) {
+ psn = be32_to_cpu(((__be32 *) data)[0]);
+ data += sizeof(__be32);
+ } else
+ psn = be32_to_cpu(ohdr->bth[2]);
+ }
+ /*
+ * The opcode is in the low byte when its in network order
+ * (top byte when in host order).
+ */
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+ * packet sequence number will be for something in the send work
+ * queue rather than the expected receive packet sequence number.
+ * In other words, this QP is the requester.
+ */
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
+ hdrsize, pmtu, header_in_data);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+
+ /* Compute 24 bits worth of difference. */
+ diff = ipath_cmp24(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
+ psn, diff, header_in_data))
+ goto done;
+ goto resched;
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ nack_inv:
+ /*
+ * A NAK will ACK earlier sends and RDMA writes. Don't queue the
+ * NAK if a RDMA read, atomic, or NAK is pending though.
+ */
+ spin_lock(&qp->s_lock);
+ if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
+ qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
+ spin_unlock(&qp->s_lock);
+ goto done;
+ }
+ /* XXX Flush WQEs */
+ qp->state = IB_QPS_ERR;
+ qp->s_ack_state = OP(SEND_ONLY);
+ qp->s_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->s_ack_psn = qp->r_psn;
+ goto resched;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ case OP(RDMA_READ_REQUEST):
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD):
+ /*
+ * Drop all new requests until a response has been sent. A
+ * new request then ACKs the RDMA response we sent. Relaxed
+ * ordering would allow new requests to be processed but we
+ * would need to keep a queue of rwqe's for all that are in
+ * progress. Note that we can't RNR NAK this request since
+ * the RDMA READ or atomic response is already queued to be
+ * sent (unless we implement a response send queue).
+ */
+ goto done;
+
+ default:
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ goto nack_inv;
+ break;
+ }
+
+ wc.imm_data = 0;
+ wc.wc_flags = 0;
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ if (!ipath_get_rwqe(qp, 0)) {
+ rnr_nak:
+ /*
+ * A RNR NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if a RDMA read or atomic
+ * is pending though.
+ */
+ spin_lock(&qp->s_lock);
+ if (qp->s_ack_state >=
+ OP(RDMA_READ_REQUEST) &&
+ qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
+ spin_unlock(&qp->s_lock);
+ goto done;
+ }
+ qp->s_ack_state = OP(SEND_ONLY);
+ qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
+ qp->s_ack_psn = qp->r_psn;
+ goto resched;
+ }
+ qp->r_rcv_len = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ case OP(RDMA_WRITE_MIDDLE):
+ send_middle:
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto nack_inv;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto nack_inv;
+ ipath_copy_sge(&qp->r_sge, data, pmtu);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ /* consume RWQE */
+ if (!ipath_get_rwqe(qp, 1))
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ if (!ipath_get_rwqe(qp, 0))
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto send_last;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+ send_last_imm:
+ if (header_in_data) {
+ wc.imm_data = *(__be32 *) data;
+ data += sizeof(__be32);
+ } else {
+ /* Immediate data comes after BTH */
+ wc.imm_data = ohdr->u.imm_data;
+ }
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST):
+ case OP(RDMA_WRITE_LAST):
+ send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto nack_inv;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto nack_inv;
+ ipath_copy_sge(&qp->r_sge, data, tlen);
+ atomic_inc(&qp->msn);
+ if (opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_ONLY))
+ break;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = qp->remote_qpn;
+ wc.pkey_index = 0;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ /* Signal completion event if the solicited bit is set. */
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ __constant_cpu_to_be32(1 << 23)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ /* consume RWQE */
+ /* RETH comes after BTH */
+ if (!header_in_data)
+ reth = &ohdr->u.rc.reth;
+ else {
+ reth = (struct ib_reth *)data;
+ data += sizeof(*reth);
+ }
+ hdrsize += sizeof(*reth);
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = ipath_rkey_ok(dev, &qp->r_sge,
+ qp->r_len, vaddr, rkey,
+ IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok)) {
+ nack_acc:
+ /*
+ * A NAK will ACK earlier sends and RDMA
+ * writes. Don't queue the NAK if a RDMA
+ * read, atomic, or NAK is pending though.
+ */
+ spin_lock(&qp->s_lock);
+ if (qp->s_ack_state >=
+ OP(RDMA_READ_REQUEST) &&
+ qp->s_ack_state !=
+ IB_OPCODE_ACKNOWLEDGE) {
+ spin_unlock(&qp->s_lock);
+ goto done;
+ }
+ /* XXX Flush WQEs */
+ qp->state = IB_QPS_ERR;
+ qp->s_ack_state = OP(RDMA_WRITE_ONLY);
+ qp->s_nak_state =
+ IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->s_ack_psn = qp->r_psn;
+ goto resched;
+ }
+ } else {
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE)))
+ goto nack_acc;
+ if (opcode == OP(RDMA_WRITE_FIRST))
+ goto send_middle;
+ else if (opcode == OP(RDMA_WRITE_ONLY))
+ goto send_last;
+ if (!ipath_get_rwqe(qp, 1))
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(RDMA_READ_REQUEST):
+ /* RETH comes after BTH */
+ if (!header_in_data)
+ reth = &ohdr->u.rc.reth;
+ else {
+ reth = (struct ib_reth *)data;
+ data += sizeof(*reth);
+ }
+ spin_lock(&qp->s_lock);
+ if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
+ qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
+ spin_unlock(&qp->s_lock);
+ goto done;
+ }
+ qp->s_rdma_len = be32_to_cpu(reth->length);
+ if (qp->s_rdma_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
+ qp->s_rdma_len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok)) {
+ spin_unlock(&qp->s_lock);
+ goto nack_acc;
+ }
+ /*
+ * Update the next expected PSN. We add 1 later
+ * below, so only add the remainder here.
+ */
+ if (qp->s_rdma_len > pmtu)
+ qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
+ } else {
+ qp->s_rdma_sge.sg_list = NULL;
+ qp->s_rdma_sge.num_sge = 0;
+ qp->s_rdma_sge.sge.mr = NULL;
+ qp->s_rdma_sge.sge.vaddr = NULL;
+ qp->s_rdma_sge.sge.length = 0;
+ qp->s_rdma_sge.sge.sge_length = 0;
+ }
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_READ)))
+ goto nack_acc;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ atomic_inc(&qp->msn);
+ qp->s_ack_state = opcode;
+ qp->s_nak_state = 0;
+ qp->s_ack_psn = psn;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ goto rdmadone;
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ struct ib_atomic_eth *ateth;
+ u64 vaddr;
+ u64 sdata;
+ u32 rkey;
+
+ if (!header_in_data)
+ ateth = &ohdr->u.atomic_eth;
+ else {
+ ateth = (struct ib_atomic_eth *)data;
+ data += sizeof(*ateth);
+ }
+ vaddr = be64_to_cpu(ateth->vaddr);
+ if (unlikely(vaddr & (sizeof(u64) - 1)))
+ goto nack_inv;
+ rkey = be32_to_cpu(ateth->rkey);
+ /* Check rkey & NAK */
+ if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge,
+ sizeof(u64), vaddr, rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc;
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc;
+ /* Perform atomic OP and save result. */
+ sdata = be64_to_cpu(ateth->swap_data);
+ spin_lock(&dev->pending_lock);
+ qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
+ if (opcode == OP(FETCH_ADD))
+ *(u64 *) qp->r_sge.sge.vaddr =
+ qp->r_atomic_data + sdata;
+ else if (qp->r_atomic_data ==
+ be64_to_cpu(ateth->compare_data))
+ *(u64 *) qp->r_sge.sge.vaddr = sdata;
+ spin_unlock(&dev->pending_lock);
+ atomic_inc(&qp->msn);
+ qp->r_atomic_psn = psn & IPS_PSN_MASK;
+ psn |= 1 << 31;
+ break;
+ }
+
+ default:
+ /* Drop packet for unknown opcodes. */
+ goto done;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ /* Send an ACK if requested or required. */
+ if (psn & (1 << 31)) {
+ /*
+ * Coalesce ACKs unless there is a RDMA READ or
+ * ATOMIC pending.
+ */
+ spin_lock(&qp->s_lock);
+ if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
+ qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
+ qp->s_ack_state = opcode;
+ qp->s_nak_state = 0;
+ qp->s_ack_psn = psn;
+ qp->s_ack_atomic = qp->r_atomic_data;
+ goto resched;
+ }
+ spin_unlock(&qp->s_lock);
+ }
+done:
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ goto bail;
+
+resched:
+ /*
+ * Try to send ACK right away but not if ipath_do_rc_send() is
+ * active.
+ */
+ if (qp->s_hdrwords == 0 &&
+ (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
+ qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
+ send_rc_ack(qp);
+
+rdmadone:
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+
+ /* Call ipath_do_rc_send() in another thread. */
+ tasklet_hi_schedule(&qp->s_task);
+
+bail:
+ return;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
new file mode 100644
index 0000000000000..1e59750c5f63d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _IPATH_REGISTERS_H
+#define _IPATH_REGISTERS_H
+
+/*
+ * This file should only be included by kernel source, and by the diags.
+ * It defines the registers, and their contents, for the InfiniPath HT-400 chip
+ */
+
+/*
+ * These are the InfiniPath register and buffer bit definitions,
+ * that are visible to software, and needed only by the kernel
+ * and diag code. A few, that are visible to protocol and user
+ * code are in ipath_common.h. Some bits are specific
+ * to a given chip implementation, and have been moved to the
+ * chip-specific source file
+ */
+
+/* kr_revision bits */
+#define INFINIPATH_R_CHIPREVMINOR_MASK 0xFF
+#define INFINIPATH_R_CHIPREVMINOR_SHIFT 0
+#define INFINIPATH_R_CHIPREVMAJOR_MASK 0xFF
+#define INFINIPATH_R_CHIPREVMAJOR_SHIFT 8
+#define INFINIPATH_R_ARCH_MASK 0xFF
+#define INFINIPATH_R_ARCH_SHIFT 16
+#define INFINIPATH_R_SOFTWARE_MASK 0xFF
+#define INFINIPATH_R_SOFTWARE_SHIFT 24
+#define INFINIPATH_R_BOARDID_MASK 0xFF
+#define INFINIPATH_R_BOARDID_SHIFT 32
+
+/* kr_control bits */
+#define INFINIPATH_C_FREEZEMODE 0x00000002
+#define INFINIPATH_C_LINKENABLE 0x00000004
+#define INFINIPATH_C_RESET 0x00000001
+
+/* kr_sendctrl bits */
+#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
+
+#define IPATH_S_ABORT 0
+#define IPATH_S_PIOINTBUFAVAIL 1
+#define IPATH_S_PIOBUFAVAILUPD 2
+#define IPATH_S_PIOENABLE 3
+#define IPATH_S_DISARM 31
+
+#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT)
+#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL)
+#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD)
+#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE)
+#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM)
+
+/* kr_rcvctrl bits */
+#define INFINIPATH_R_PORTENABLE_SHIFT 0
+#define INFINIPATH_R_INTRAVAIL_SHIFT 16
+#define INFINIPATH_R_TAILUPD 0x80000000
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define INFINIPATH_I_RCVURG_SHIFT 0
+#define INFINIPATH_I_RCVAVAIL_SHIFT 12
+#define INFINIPATH_I_ERROR 0x80000000
+#define INFINIPATH_I_SPIOSENT 0x40000000
+#define INFINIPATH_I_SPIOBUFAVAIL 0x20000000
+#define INFINIPATH_I_GPIO 0x10000000
+
+/* kr_errorstatus, kr_errorclear, kr_errormask bits */
+#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL
+#define INFINIPATH_E_RVCRC 0x0000000000000002ULL
+#define INFINIPATH_E_RICRC 0x0000000000000004ULL
+#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL
+#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL
+#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL
+#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL
+#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL
+#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL
+#define INFINIPATH_E_REBP 0x0000000000000200ULL
+#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL
+#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL
+#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL
+#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL
+#define INFINIPATH_E_RBADTID 0x0000000000004000ULL
+#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL
+#define INFINIPATH_E_RHDR 0x0000000000010000ULL
+#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL
+#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL
+#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL
+#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL
+#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL
+#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL
+#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
+#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL
+#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
+#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL
+#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
+#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL
+#define INFINIPATH_E_RESET 0x0004000000000000ULL
+#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
+ * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: eagerTID, 3: expTID
+ * bit 4: flag buffer, 5: datainfo, 6: header info */
+#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
+#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
+#define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL
+#define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44
+#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL
+#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL
+#define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL
+#define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL
+
+/* kr_hwdiagctrl bits */
+#define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL
+#define INFINIPATH_DC_FORCETXEMEMPARITYERR_SHIFT 40
+#define INFINIPATH_DC_FORCERXEMEMPARITYERR_MASK 0x7FULL
+#define INFINIPATH_DC_FORCERXEMEMPARITYERR_SHIFT 44
+#define INFINIPATH_DC_FORCERXDSYNCMEMPARITYERR 0x0000000400000000ULL
+#define INFINIPATH_DC_COUNTERDISABLE 0x1000000000000000ULL
+#define INFINIPATH_DC_COUNTERWREN 0x2000000000000000ULL
+#define INFINIPATH_DC_FORCEIBCBUSTOSPCPARITYERR 0x4000000000000000ULL
+#define INFINIPATH_DC_FORCEIBCBUSFRSPCPARITYERR 0x8000000000000000ULL
+
+/* kr_ibcctrl bits */
+#define INFINIPATH_IBCC_FLOWCTRLPERIOD_MASK 0xFFULL
+#define INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT 0
+#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_MASK 0xFFULL
+#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
+#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
+#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
+#define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */
+#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */
+#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
+#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
+#define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */
+#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
+#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
+#define INFINIPATH_IBCC_MAXPKTLEN_SHIFT 20
+#define INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK 0xFULL
+#define INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT 32
+#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK 0xFULL
+#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT 36
+#define INFINIPATH_IBCC_CREDITSCALE_MASK 0x7ULL
+#define INFINIPATH_IBCC_CREDITSCALE_SHIFT 40
+#define INFINIPATH_IBCC_LOOPBACK 0x8000000000000000ULL
+#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
+
+/* kr_ibcstatus bits */
+#define INFINIPATH_IBCS_LINKTRAININGSTATE_MASK 0xF
+#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
+#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
+#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4
+#define INFINIPATH_IBCS_TXREADY 0x40000000
+#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
+/* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
+#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00
+#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01
+#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02
+#define INFINIPATH_IBCS_LT_STATE_POLLQUIET 0x03
+#define INFINIPATH_IBCS_LT_STATE_SLEEPDELAY 0x04
+#define INFINIPATH_IBCS_LT_STATE_SLEEPQUIET 0x05
+#define INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE 0x08
+#define INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG 0x09
+#define INFINIPATH_IBCS_LT_STATE_CFGWAITRMT 0x0a
+#define INFINIPATH_IBCS_LT_STATE_CFGIDLE 0x0b
+#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c
+#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e
+#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f
+/* link state machine states (shift by INFINIPATH_IBCS_LINKSTATE_SHIFT) */
+#define INFINIPATH_IBCS_L_STATE_DOWN 0x0
+#define INFINIPATH_IBCS_L_STATE_INIT 0x1
+#define INFINIPATH_IBCS_L_STATE_ARM 0x2
+#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3
+#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4
+
+/* combination link status states that we use with some frequency */
+#define IPATH_IBSTATE_MASK ((INFINIPATH_IBCS_LINKTRAININGSTATE_MASK \
+ << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
+ (INFINIPATH_IBCS_LINKSTATE_MASK \
+ <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
+#define IPATH_IBSTATE_INIT ((INFINIPATH_IBCS_L_STATE_INIT \
+ << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
+ (INFINIPATH_IBCS_LT_STATE_LINKUP \
+ <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
+#define IPATH_IBSTATE_ARM ((INFINIPATH_IBCS_L_STATE_ARM \
+ << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
+ (INFINIPATH_IBCS_LT_STATE_LINKUP \
+ <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
+#define IPATH_IBSTATE_ACTIVE ((INFINIPATH_IBCS_L_STATE_ACTIVE \
+ << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
+ (INFINIPATH_IBCS_LT_STATE_LINKUP \
+ <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
+
+/* kr_extstatus bits */
+#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
+#define INFINIPATH_EXTS_GPIOIN_MASK 0xFFFFULL
+#define INFINIPATH_EXTS_GPIOIN_SHIFT 48
+
+/* kr_extctrl bits */
+#define INFINIPATH_EXTC_GPIOINVERT_MASK 0xFFFFULL
+#define INFINIPATH_EXTC_GPIOINVERT_SHIFT 32
+#define INFINIPATH_EXTC_GPIOOE_MASK 0xFFFFULL
+#define INFINIPATH_EXTC_GPIOOE_SHIFT 48
+#define INFINIPATH_EXTC_SERDESENABLE 0x80000000ULL
+#define INFINIPATH_EXTC_SERDESCONNECT 0x40000000ULL
+#define INFINIPATH_EXTC_SERDESENTRUNKING 0x20000000ULL
+#define INFINIPATH_EXTC_SERDESDISRXFIFO 0x10000000ULL
+#define INFINIPATH_EXTC_SERDESENPLPBK1 0x08000000ULL
+#define INFINIPATH_EXTC_SERDESENPLPBK2 0x04000000ULL
+#define INFINIPATH_EXTC_SERDESENENCDEC 0x02000000ULL
+#define INFINIPATH_EXTC_LED1SECPORT_ON 0x00000020ULL
+#define INFINIPATH_EXTC_LED2SECPORT_ON 0x00000010ULL
+#define INFINIPATH_EXTC_LED1PRIPORT_ON 0x00000008ULL
+#define INFINIPATH_EXTC_LED2PRIPORT_ON 0x00000004ULL
+#define INFINIPATH_EXTC_LEDGBLOK_ON 0x00000002ULL
+#define INFINIPATH_EXTC_LEDGBLERR_OFF 0x00000001ULL
+
+/* kr_mdio bits */
+#define INFINIPATH_MDIO_CLKDIV_MASK 0x7FULL
+#define INFINIPATH_MDIO_CLKDIV_SHIFT 32
+#define INFINIPATH_MDIO_COMMAND_MASK 0x7ULL
+#define INFINIPATH_MDIO_COMMAND_SHIFT 26
+#define INFINIPATH_MDIO_DEVADDR_MASK 0x1FULL
+#define INFINIPATH_MDIO_DEVADDR_SHIFT 21
+#define INFINIPATH_MDIO_REGADDR_MASK 0x1FULL
+#define INFINIPATH_MDIO_REGADDR_SHIFT 16
+#define INFINIPATH_MDIO_DATA_MASK 0xFFFFULL
+#define INFINIPATH_MDIO_DATA_SHIFT 0
+#define INFINIPATH_MDIO_CMDVALID 0x0000000040000000ULL
+#define INFINIPATH_MDIO_RDDATAVALID 0x0000000080000000ULL
+
+/* kr_partitionkey bits */
+#define INFINIPATH_PKEY_SIZE 16
+#define INFINIPATH_PKEY_MASK 0xFFFF
+#define INFINIPATH_PKEY_DEFAULT_PKEY 0xFFFF
+
+/* kr_serdesconfig0 bits */
+#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */
+#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */
+#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */
+#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */
+#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT,
+ Otherwise not used on IB side */
+
+/* kr_xgxsconfig bits */
+#define INFINIPATH_XGXS_RESET 0x7ULL
+#define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL
+#define INFINIPATH_XGXS_MDIOADDR_SHIFT 4
+
+#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
+
+/* TID entries (memory), HT400-only */
+#define INFINIPATH_RT_VALID 0x8000000000000000ULL
+#define INFINIPATH_RT_ADDR_SHIFT 0
+#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF
+#define INFINIPATH_RT_BUFSIZE_SHIFT 48
+
+/*
+ * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our
+ * PIO send buffers. This is well beyond anything currently
+ * defined in the InfiniBand spec.
+ */
+#define IPATH_PIO_MAXIBHDR 128
+
+typedef u64 ipath_err_t;
+
+/* mask of defined bits for various registers */
+extern u64 infinipath_i_bitsextant;
+extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
+
+/* masks that are different in various chips, or only exist in some chips */
+extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
+
+/*
+ * register bits for selecting i2c direction and values, used for I2C serial
+ * flash
+ */
+extern u16 ipath_gpio_sda_num, ipath_gpio_scl_num;
+extern u64 ipath_gpio_sda, ipath_gpio_scl;
+
+/*
+ * These are the infinipath general register numbers (not offsets).
+ * The kernel registers are used directly, those beyond the kernel
+ * registers are calculated from one of the base registers. The use of
+ * an integer type doesn't allow type-checking as thorough as, say,
+ * an enum but allows for better hiding of chip differences.
+ */
+typedef const u16 ipath_kreg, /* infinipath general registers */
+ ipath_creg, /* infinipath counter registers */
+ ipath_sreg; /* kernel-only, infinipath send registers */
+
+/*
+ * These are the chip registers common to all infinipath chips, and
+ * used both by the kernel and the diagnostics or other user code.
+ * They are all implemented such that 64 bit accesses work.
+ * Some implement no more than 32 bits. Because 64 bit reads
+ * require 2 HT cmds on opteron, we access those with 32 bit
+ * reads for efficiency (they are written as 64 bits, since
+ * the extra 32 bits are nearly free on writes, and it slightly reduces
+ * complexity). The rest are all accessed as 64 bits.
+ */
+struct ipath_kregs {
+ /* These are the 32 bit group */
+ ipath_kreg kr_control;
+ ipath_kreg kr_counterregbase;
+ ipath_kreg kr_intmask;
+ ipath_kreg kr_intstatus;
+ ipath_kreg kr_pagealign;
+ ipath_kreg kr_portcnt;
+ ipath_kreg kr_rcvtidbase;
+ ipath_kreg kr_rcvtidcnt;
+ ipath_kreg kr_rcvegrbase;
+ ipath_kreg kr_rcvegrcnt;
+ ipath_kreg kr_scratch;
+ ipath_kreg kr_sendctrl;
+ ipath_kreg kr_sendpiobufbase;
+ ipath_kreg kr_sendpiobufcnt;
+ ipath_kreg kr_sendpiosize;
+ ipath_kreg kr_sendregbase;
+ ipath_kreg kr_userregbase;
+ /* These are the 64 bit group */
+ ipath_kreg kr_debugport;
+ ipath_kreg kr_debugportselect;
+ ipath_kreg kr_errorclear;
+ ipath_kreg kr_errormask;
+ ipath_kreg kr_errorstatus;
+ ipath_kreg kr_extctrl;
+ ipath_kreg kr_extstatus;
+ ipath_kreg kr_gpio_clear;
+ ipath_kreg kr_gpio_mask;
+ ipath_kreg kr_gpio_out;
+ ipath_kreg kr_gpio_status;
+ ipath_kreg kr_hwdiagctrl;
+ ipath_kreg kr_hwerrclear;
+ ipath_kreg kr_hwerrmask;
+ ipath_kreg kr_hwerrstatus;
+ ipath_kreg kr_ibcctrl;
+ ipath_kreg kr_ibcstatus;
+ ipath_kreg kr_intblocked;
+ ipath_kreg kr_intclear;
+ ipath_kreg kr_interruptconfig;
+ ipath_kreg kr_mdio;
+ ipath_kreg kr_partitionkey;
+ ipath_kreg kr_rcvbthqp;
+ ipath_kreg kr_rcvbufbase;
+ ipath_kreg kr_rcvbufsize;
+ ipath_kreg kr_rcvctrl;
+ ipath_kreg kr_rcvhdrcnt;
+ ipath_kreg kr_rcvhdrentsize;
+ ipath_kreg kr_rcvhdrsize;
+ ipath_kreg kr_rcvintmembase;
+ ipath_kreg kr_rcvintmemsize;
+ ipath_kreg kr_revision;
+ ipath_kreg kr_sendbuffererror;
+ ipath_kreg kr_sendpioavailaddr;
+ ipath_kreg kr_serdesconfig0;
+ ipath_kreg kr_serdesconfig1;
+ ipath_kreg kr_serdesstatus;
+ ipath_kreg kr_txintmembase;
+ ipath_kreg kr_txintmemsize;
+ ipath_kreg kr_xgxsconfig;
+ ipath_kreg kr_ibpllcfg;
+ /* use these two (and the following N ports) only with ipath_k*_kreg64_port();
+ * not *kreg64() */
+ ipath_kreg kr_rcvhdraddr;
+ ipath_kreg kr_rcvhdrtailaddr;
+
+ /* remaining registers are not present on all types of infinipath chips */
+ ipath_kreg kr_rcvpktledcnt;
+ ipath_kreg kr_pcierbuftestreg0;
+ ipath_kreg kr_pcierbuftestreg1;
+ ipath_kreg kr_pcieq0serdesconfig0;
+ ipath_kreg kr_pcieq0serdesconfig1;
+ ipath_kreg kr_pcieq0serdesstatus;
+ ipath_kreg kr_pcieq1serdesconfig0;
+ ipath_kreg kr_pcieq1serdesconfig1;
+ ipath_kreg kr_pcieq1serdesstatus;
+};
+
+struct ipath_cregs {
+ ipath_creg cr_badformatcnt;
+ ipath_creg cr_erricrccnt;
+ ipath_creg cr_errlinkcnt;
+ ipath_creg cr_errlpcrccnt;
+ ipath_creg cr_errpkey;
+ ipath_creg cr_errrcvflowctrlcnt;
+ ipath_creg cr_err_rlencnt;
+ ipath_creg cr_errslencnt;
+ ipath_creg cr_errtidfull;
+ ipath_creg cr_errtidvalid;
+ ipath_creg cr_errvcrccnt;
+ ipath_creg cr_ibstatuschange;
+ ipath_creg cr_intcnt;
+ ipath_creg cr_invalidrlencnt;
+ ipath_creg cr_invalidslencnt;
+ ipath_creg cr_lbflowstallcnt;
+ ipath_creg cr_iblinkdowncnt;
+ ipath_creg cr_iblinkerrrecovcnt;
+ ipath_creg cr_ibsymbolerrcnt;
+ ipath_creg cr_pktrcvcnt;
+ ipath_creg cr_pktrcvflowctrlcnt;
+ ipath_creg cr_pktsendcnt;
+ ipath_creg cr_pktsendflowcnt;
+ ipath_creg cr_portovflcnt;
+ ipath_creg cr_rcvebpcnt;
+ ipath_creg cr_rcvovflcnt;
+ ipath_creg cr_rxdroppktcnt;
+ ipath_creg cr_senddropped;
+ ipath_creg cr_sendstallcnt;
+ ipath_creg cr_sendunderruncnt;
+ ipath_creg cr_unsupvlcnt;
+ ipath_creg cr_wordrcvcnt;
+ ipath_creg cr_wordsendcnt;
+};
+
+#endif /* _IPATH_REGISTERS_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
new file mode 100644
index 0000000000000..f232e77b78eef
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ipath_verbs.h"
+
+/*
+ * Convert the AETH RNR timeout code into the number of milliseconds.
+ */
+const u32 ib_ipath_rnr_table[32] = {
+ 656, /* 0 */
+ 1, /* 1 */
+ 1, /* 2 */
+ 1, /* 3 */
+ 1, /* 4 */
+ 1, /* 5 */
+ 1, /* 6 */
+ 1, /* 7 */
+ 1, /* 8 */
+ 1, /* 9 */
+ 1, /* A */
+ 1, /* B */
+ 1, /* C */
+ 1, /* D */
+ 2, /* E */
+ 2, /* F */
+ 3, /* 10 */
+ 4, /* 11 */
+ 6, /* 12 */
+ 8, /* 13 */
+ 11, /* 14 */
+ 16, /* 15 */
+ 21, /* 16 */
+ 31, /* 17 */
+ 41, /* 18 */
+ 62, /* 19 */
+ 82, /* 1A */
+ 123, /* 1B */
+ 164, /* 1C */
+ 246, /* 1D */
+ 328, /* 1E */
+ 492 /* 1F */
+};
+
+/**
+ * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
+ * @qp: the QP
+ *
+ * XXX Use a simple list for now. We might need a priority
+ * queue if we have lots of QPs waiting for RNR timeouts
+ * but that should be rare.
+ */
+void ipath_insert_rnr_queue(struct ipath_qp *qp)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ if (list_empty(&dev->rnrwait))
+ list_add(&qp->timerwait, &dev->rnrwait);
+ else {
+ struct list_head *l = &dev->rnrwait;
+ struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
+ timerwait);
+
+ while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
+ qp->s_rnr_timeout -= nqp->s_rnr_timeout;
+ l = l->next;
+ if (l->next == &dev->rnrwait)
+ break;
+ nqp = list_entry(l->next, struct ipath_qp,
+ timerwait);
+ }
+ list_add(&qp->timerwait, l);
+ }
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+}
+
+/**
+ * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update wr_id only, not SGEs
+ *
+ * Return 0 if no RWQE is available, otherwise return 1.
+ *
+ * Called at interrupt level with the QP r_rq.lock held.
+ */
+int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
+{
+ struct ipath_rq *rq;
+ struct ipath_srq *srq;
+ struct ipath_rwqe *wqe;
+ int ret;
+
+ if (!qp->ibqp.srq) {
+ rq = &qp->r_rq;
+ if (unlikely(rq->tail == rq->head)) {
+ ret = 0;
+ goto bail;
+ }
+ wqe = get_rwqe_ptr(rq, rq->tail);
+ qp->r_wr_id = wqe->wr_id;
+ if (!wr_id_only) {
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->num_sge;
+ qp->r_len = wqe->length;
+ }
+ if (++rq->tail >= rq->size)
+ rq->tail = 0;
+ ret = 1;
+ goto bail;
+ }
+
+ srq = to_isrq(qp->ibqp.srq);
+ rq = &srq->rq;
+ spin_lock(&rq->lock);
+ if (unlikely(rq->tail == rq->head)) {
+ spin_unlock(&rq->lock);
+ ret = 0;
+ goto bail;
+ }
+ wqe = get_rwqe_ptr(rq, rq->tail);
+ qp->r_wr_id = wqe->wr_id;
+ if (!wr_id_only) {
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->num_sge;
+ qp->r_len = wqe->length;
+ }
+ if (++rq->tail >= rq->size)
+ rq->tail = 0;
+ if (srq->ibsrq.event_handler) {
+ struct ib_event ev;
+ u32 n;
+
+ if (rq->head < rq->tail)
+ n = rq->size + rq->head - rq->tail;
+ else
+ n = rq->head - rq->tail;
+ if (n < srq->limit) {
+ srq->limit = 0;
+ spin_unlock(&rq->lock);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ srq->ibsrq.event_handler(&ev,
+ srq->ibsrq.srq_context);
+ } else
+ spin_unlock(&rq->lock);
+ } else
+ spin_unlock(&rq->lock);
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_ruc_loopback - handle UC and RC lookback requests
+ * @sqp: the loopback QP
+ * @wc: the work completion entry
+ *
+ * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
+ * forward a WQE addressed to the same HCA.
+ * Note that although we are single threaded due to the tasklet, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc)
+{
+ struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
+ struct ipath_qp *qp;
+ struct ipath_swqe *wqe;
+ struct ipath_sge *sge;
+ unsigned long flags;
+ u64 sdata;
+
+ qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
+ if (!qp) {
+ dev->n_pkt_drops++;
+ return;
+ }
+
+again:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) {
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ goto done;
+ }
+
+ /* Get the next send request. */
+ if (sqp->s_last == sqp->s_head) {
+ /* Send work queue is empty. */
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ goto done;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ */
+ wqe = get_swqe_ptr(sqp, sqp->s_last);
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ wc->wc_flags = 0;
+ wc->imm_data = 0;
+
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->imm_data = wqe->wr.imm_data;
+ /* FALLTHROUGH */
+ case IB_WR_SEND:
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ if (!ipath_get_rwqe(qp, 0)) {
+ rnr_nak:
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ if (sqp->s_rnr_retry == 0) {
+ wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto err;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ dev->n_rnr_naks++;
+ sqp->s_rnr_timeout =
+ ib_ipath_rnr_table[sqp->s_min_rnr_timer];
+ ipath_insert_rnr_queue(sqp);
+ goto done;
+ }
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->imm_data = wqe->wr.imm_data;
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ if (!ipath_get_rwqe(qp, 1))
+ goto rnr_nak;
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE:
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_WRITE))) {
+ acc_err:
+ wc->status = IB_WC_REM_ACCESS_ERR;
+ err:
+ wc->wr_id = wqe->wr.wr_id;
+ wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ wc->vendor_err = 0;
+ wc->byte_len = 0;
+ wc->qp_num = sqp->ibqp.qp_num;
+ wc->src_qp = sqp->remote_qpn;
+ wc->pkey_index = 0;
+ wc->slid = sqp->remote_ah_attr.dlid;
+ wc->sl = sqp->remote_ah_attr.sl;
+ wc->dlid_path_bits = 0;
+ wc->port_num = 0;
+ ipath_sqerror_qp(sqp, wc);
+ goto done;
+ }
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64),
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ sdata = wqe->wr.wr.atomic.swap;
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
+ if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ *(u64 *) qp->r_sge.sge.vaddr =
+ qp->r_atomic_data + sdata;
+ else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add)
+ *(u64 *) qp->r_sge.sge.vaddr = sdata;
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
+ goto send_comp;
+
+ default:
+ goto done;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = sqp->s_len;
+
+ if (len > sge->length)
+ len = sge->length;
+ BUG_ON(len == 0);
+ ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--sqp->s_sge.num_sge)
+ *sge = *sqp->s_sge.sg_list++;
+ } else if (sge->length == 0 && sge->mr != NULL) {
+ if (++sge->n >= IPATH_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ sqp->s_len -= len;
+ }
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
+ wqe->wr.opcode == IB_WR_RDMA_READ)
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc->opcode = IB_WC_RECV;
+ wc->wr_id = qp->r_wr_id;
+ wc->status = IB_WC_SUCCESS;
+ wc->vendor_err = 0;
+ wc->byte_len = wqe->length;
+ wc->qp_num = qp->ibqp.qp_num;
+ wc->src_qp = qp->remote_qpn;
+ /* XXX do we know which pkey matched? Only needed for GSI. */
+ wc->pkey_index = 0;
+ wc->slid = qp->remote_ah_attr.dlid;
+ wc->sl = qp->remote_ah_attr.sl;
+ wc->dlid_path_bits = 0;
+ /* Signal completion event if the solicited bit is set. */
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+
+ if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = IB_WC_SUCCESS;
+ wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ wc->vendor_err = 0;
+ wc->byte_len = wqe->length;
+ wc->qp_num = sqp->ibqp.qp_num;
+ wc->src_qp = 0;
+ wc->pkey_index = 0;
+ wc->slid = 0;
+ wc->sl = 0;
+ wc->dlid_path_bits = 0;
+ wc->port_num = 0;
+ ipath_cq_enter(to_icq(sqp->ibqp.send_cq), wc, 0);
+ }
+
+ /* Update s_last now that we are finished with the SWQE */
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (++sqp->s_last >= sqp->s_size)
+ sqp->s_last = 0;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ goto again;
+
+done:
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
+ * ipath_no_bufs_available - tell the layer driver we need buffers
+ * @qp: the QP that caused the problem
+ * @dev: the device we ran out of buffers on
+ *
+ * Called when we run out of PIO buffers.
+ */
+void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ if (qp->piowait.next == LIST_POISON1)
+ list_add_tail(&qp->piowait, &dev->piowait);
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ /*
+ * Note that as soon as ipath_layer_want_buffer() is called and
+ * possibly before it returns, ipath_ib_piobufavail()
+ * could be called. If we are still in the tasklet function,
+ * tasklet_hi_schedule() will not call us until the next time
+ * tasklet_hi_schedule() is called.
+ * We clear the tasklet flag now since we are committing to return
+ * from the tasklet function.
+ */
+ clear_bit(IPATH_S_BUSY, &qp->s_flags);
+ tasklet_unlock(&qp->s_task);
+ ipath_layer_want_buffer(dev->dd);
+ dev->n_piowait++;
+}
+
+/**
+ * ipath_post_rc_send - post RC and UC sends
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ */
+int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
+{
+ struct ipath_swqe *wqe;
+ unsigned long flags;
+ u32 next;
+ int i, j;
+ int acc;
+ int ret;
+
+ /*
+ * Don't allow RDMA reads or atomic operations on UC or
+ * undefined operations.
+ * Make sure buffer is large enough to hold the result for atomics.
+ */
+ if (qp->ibqp.qp_type == IB_QPT_UC) {
+ if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
+ ret = -EINVAL;
+ goto bail;
+ } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1))) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* IB spec says that num_sge == 0 is OK. */
+ if (wr->num_sge > qp->s_max_sge) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ spin_lock_irqsave(&qp->s_lock, flags);
+ next = qp->s_head + 1;
+ if (next >= qp->s_size)
+ next = 0;
+ if (next == qp->s_last) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ wqe = get_swqe_ptr(qp, qp->s_head);
+ wqe->wr = *wr;
+ wqe->ssn = qp->s_ssn++;
+ wqe->sg_list[0].mr = NULL;
+ wqe->sg_list[0].vaddr = NULL;
+ wqe->sg_list[0].length = 0;
+ wqe->sg_list[0].sge_length = 0;
+ wqe->length = 0;
+ acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0;
+ for (i = 0, j = 0; i < wr->num_sge; i++) {
+ if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (wr->sg_list[i].length == 0)
+ continue;
+ if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table,
+ &wqe->sg_list[j], &wr->sg_list[i],
+ acc)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ret = -EINVAL;
+ goto bail;
+ }
+ wqe->length += wr->sg_list[i].length;
+ j++;
+ }
+ wqe->wr.num_sge = j;
+ qp->s_head = next;
+ /*
+ * Wake up the send tasklet if the QP is not waiting
+ * for an RNR timeout.
+ */
+ next = qp->s_rnr_timeout;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (next == 0) {
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ ipath_do_uc_send((unsigned long) qp);
+ else
+ ipath_do_rc_send((unsigned long) qp);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
new file mode 100644
index 0000000000000..01c4c6c56118a
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
+#include "ipath_verbs.h"
+
+/**
+ * ipath_post_srq_receive - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ */
+int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct ipath_srq *srq = to_isrq(ibsrq);
+ struct ipath_ibdev *dev = to_idev(ibsrq->device);
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct ipath_rwqe *wqe;
+ u32 next;
+ int i, j;
+
+ if (wr->num_sge > srq->rq.max_sge) {
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ next = srq->rq.head + 1;
+ if (next >= srq->rq.size)
+ next = 0;
+ if (next == srq->rq.tail) {
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&srq->rq, srq->rq.head);
+ wqe->wr_id = wr->wr_id;
+ wqe->sg_list[0].mr = NULL;
+ wqe->sg_list[0].vaddr = NULL;
+ wqe->sg_list[0].length = 0;
+ wqe->sg_list[0].sge_length = 0;
+ wqe->length = 0;
+ for (i = 0, j = 0; i < wr->num_sge; i++) {
+ /* Check LKEY */
+ if (to_ipd(srq->ibsrq.pd)->user &&
+ wr->sg_list[i].lkey == 0) {
+ spin_unlock_irqrestore(&srq->rq.lock,
+ flags);
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (wr->sg_list[i].length == 0)
+ continue;
+ if (!ipath_lkey_ok(&dev->lk_table,
+ &wqe->sg_list[j],
+ &wr->sg_list[i],
+ IB_ACCESS_LOCAL_WRITE)) {
+ spin_unlock_irqrestore(&srq->rq.lock,
+ flags);
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+ wqe->length += wr->sg_list[i].length;
+ j++;
+ }
+ wqe->num_sge = j;
+ srq->rq.head = next;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_create_srq - create a shared receive queue
+ * @ibpd: the protection domain of the SRQ to create
+ * @attr: the attributes of the SRQ
+ * @udata: not used by the InfiniPath verbs driver
+ */
+struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct ipath_srq *srq;
+ u32 sz;
+ struct ib_srq *ret;
+
+ if (srq_init_attr->attr.max_sge < 1) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ srq->rq.size = srq_init_attr->attr.max_wr + 1;
+ sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge +
+ sizeof(struct ipath_rwqe);
+ srq->rq.wq = vmalloc(srq->rq.size * sz);
+ if (!srq->rq.wq) {
+ kfree(srq);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ /*
+ * ib_create_srq() will initialize srq->ibsrq.
+ */
+ spin_lock_init(&srq->rq.lock);
+ srq->rq.head = 0;
+ srq->rq.tail = 0;
+ srq->rq.max_sge = srq_init_attr->attr.max_sge;
+ srq->limit = srq_init_attr->attr.srq_limit;
+
+ ret = &srq->ibsrq;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_modify_srq - modify a shared receive queue
+ * @ibsrq: the SRQ to modify
+ * @attr: the new attributes of the SRQ
+ * @attr_mask: indicates which attributes to modify
+ */
+int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask)
+{
+ struct ipath_srq *srq = to_isrq(ibsrq);
+ unsigned long flags;
+ int ret;
+
+ if (attr_mask & IB_SRQ_LIMIT) {
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ srq->limit = attr->srq_limit;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ u32 size = attr->max_wr + 1;
+ struct ipath_rwqe *wq, *p;
+ u32 n;
+ u32 sz;
+
+ if (attr->max_sge < srq->rq.max_sge) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ sz = sizeof(struct ipath_rwqe) +
+ attr->max_sge * sizeof(struct ipath_sge);
+ wq = vmalloc(size * sz);
+ if (!wq) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ if (srq->rq.head < srq->rq.tail)
+ n = srq->rq.size + srq->rq.head - srq->rq.tail;
+ else
+ n = srq->rq.head - srq->rq.tail;
+ if (size <= n || size <= srq->limit) {
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ vfree(wq);
+ ret = -EINVAL;
+ goto bail;
+ }
+ n = 0;
+ p = wq;
+ while (srq->rq.tail != srq->rq.head) {
+ struct ipath_rwqe *wqe;
+ int i;
+
+ wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail);
+ p->wr_id = wqe->wr_id;
+ p->length = wqe->length;
+ p->num_sge = wqe->num_sge;
+ for (i = 0; i < wqe->num_sge; i++)
+ p->sg_list[i] = wqe->sg_list[i];
+ n++;
+ p = (struct ipath_rwqe *)((char *) p + sz);
+ if (++srq->rq.tail >= srq->rq.size)
+ srq->rq.tail = 0;
+ }
+ vfree(srq->rq.wq);
+ srq->rq.wq = wq;
+ srq->rq.size = size;
+ srq->rq.head = n;
+ srq->rq.tail = 0;
+ srq->rq.max_sge = attr->max_sge;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct ipath_srq *srq = to_isrq(ibsrq);
+
+ attr->max_wr = srq->rq.size - 1;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+}
+
+/**
+ * ipath_destroy_srq - destroy a shared receive queue
+ * @ibsrq: the SRQ to destroy
+ */
+int ipath_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct ipath_srq *srq = to_isrq(ibsrq);
+
+ vfree(srq->rq.wq);
+ kfree(srq);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
new file mode 100644
index 0000000000000..fe209137ee74f
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+
+#include "ipath_kernel.h"
+
+struct infinipath_stats ipath_stats;
+
+/**
+ * ipath_snap_cntr - snapshot a chip counter
+ * @dd: the infinipath device
+ * @creg: the counter to snapshot
+ *
+ * called from add_timer and user counter read calls, to deal with
+ * counters that wrap in "human time". The words sent and received, and
+ * the packets sent and received are all that we worry about. For now,
+ * at least, we don't worry about error counters, because if they wrap
+ * that quickly, we probably don't care. We may eventually just make this
+ * handle all the counters. word counters can wrap in about 20 seconds
+ * of full bandwidth traffic, packet counters in a few hours.
+ */
+
+u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
+{
+ u32 val, reg64 = 0;
+ u64 val64;
+ unsigned long t0, t1;
+ u64 ret;
+
+ t0 = jiffies;
+ /* If fast increment counters are only 32 bits, snapshot them,
+ * and maintain them as 64bit values in the driver */
+ if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
+ (creg == dd->ipath_cregs->cr_wordsendcnt ||
+ creg == dd->ipath_cregs->cr_wordrcvcnt ||
+ creg == dd->ipath_cregs->cr_pktsendcnt ||
+ creg == dd->ipath_cregs->cr_pktrcvcnt)) {
+ val64 = ipath_read_creg(dd, creg);
+ val = val64 == ~0ULL ? ~0U : 0;
+ reg64 = 1;
+ } else /* val64 just to keep gcc quiet... */
+ val64 = val = ipath_read_creg32(dd, creg);
+ /*
+ * See if a second has passed. This is just a way to detect things
+ * that are quite broken. Normally this should take just a few
+ * cycles (the check is for long enough that we don't care if we get
+ * pre-empted.) An Opteron HT O read timeout is 4 seconds with
+ * normal NB values
+ */
+ t1 = jiffies;
+ if (time_before(t0 + HZ, t1) && val == -1) {
+ ipath_dev_err(dd, "Error! Read counter 0x%x timed out\n",
+ creg);
+ ret = 0ULL;
+ goto bail;
+ }
+ if (reg64) {
+ ret = val64;
+ goto bail;
+ }
+
+ if (creg == dd->ipath_cregs->cr_wordsendcnt) {
+ if (val != dd->ipath_lastsword) {
+ dd->ipath_sword += val - dd->ipath_lastsword;
+ dd->ipath_lastsword = val;
+ }
+ val64 = dd->ipath_sword;
+ } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
+ if (val != dd->ipath_lastrword) {
+ dd->ipath_rword += val - dd->ipath_lastrword;
+ dd->ipath_lastrword = val;
+ }
+ val64 = dd->ipath_rword;
+ } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
+ if (val != dd->ipath_lastspkts) {
+ dd->ipath_spkts += val - dd->ipath_lastspkts;
+ dd->ipath_lastspkts = val;
+ }
+ val64 = dd->ipath_spkts;
+ } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
+ if (val != dd->ipath_lastrpkts) {
+ dd->ipath_rpkts += val - dd->ipath_lastrpkts;
+ dd->ipath_lastrpkts = val;
+ }
+ val64 = dd->ipath_rpkts;
+ } else
+ val64 = (u64) val;
+
+ ret = val64;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
+ * @dd: the infinipath device
+ *
+ * print the delta of egrfull/hdrqfull errors for kernel ports no more than
+ * every 5 seconds. User processes are printed at close, but kernel doesn't
+ * close, so... Separate routine so may call from other places someday, and
+ * so function name when printed by _IPATH_INFO is meaningfull
+ */
+static void ipath_qcheck(struct ipath_devdata *dd)
+{
+ static u64 last_tot_hdrqfull;
+ size_t blen = 0;
+ char buf[128];
+
+ *buf = 0;
+ if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
+ blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
+ dd->ipath_pd[0]->port_hdrqfull -
+ dd->ipath_p0_hdrqfull);
+ dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
+ }
+ if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
+ blen += snprintf(buf + blen, sizeof buf - blen,
+ "%srcvegrfull %llu",
+ blen ? ", " : "",
+ (unsigned long long)
+ (ipath_stats.sps_etidfull -
+ dd->ipath_last_tidfull));
+ dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
+ }
+
+ /*
+ * this is actually the number of hdrq full interrupts, not actual
+ * events, but at the moment that's mostly what I'm interested in.
+ * Actual count, etc. is in the counters, if needed. For production
+ * users this won't ordinarily be printed.
+ */
+
+ if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
+ ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
+ blen += snprintf(buf + blen, sizeof buf - blen,
+ "%shdrqfull %llu (all ports)",
+ blen ? ", " : "",
+ (unsigned long long)
+ (ipath_stats.sps_hdrqfull -
+ last_tot_hdrqfull));
+ last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
+ }
+ if (blen)
+ ipath_dbg("%s\n", buf);
+
+ if (dd->ipath_port0head != (u32)
+ le64_to_cpu(*dd->ipath_hdrqtailptr)) {
+ if (dd->ipath_lastport0rcv_cnt ==
+ ipath_stats.sps_port0pkts) {
+ ipath_cdbg(PKT, "missing rcv interrupts? "
+ "port0 hd=%llx tl=%x; port0pkts %llx\n",
+ (unsigned long long)
+ le64_to_cpu(*dd->ipath_hdrqtailptr),
+ dd->ipath_port0head,
+ (unsigned long long)
+ ipath_stats.sps_port0pkts);
+ ipath_kreceive(dd);
+ }
+ dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
+ }
+}
+
+/**
+ * ipath_get_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the infinipath device ipath_devdata
+ *
+ * called from add_timer
+ */
+void ipath_get_faststats(unsigned long opaque)
+{
+ struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
+ u32 val;
+ static unsigned cnt;
+
+ /*
+ * don't access the chip while running diags, or memory diags can
+ * fail
+ */
+ if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT) ||
+ ipath_diag_inuse)
+ /* but re-arm the timer, for diags case; won't hurt other */
+ goto done;
+
+ if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+ }
+
+ ipath_qcheck(dd);
+
+ /*
+ * deal with repeat error suppression. Doesn't really matter if
+ * last error was almost a full interval ago, or just a few usecs
+ * ago; still won't get more than 2 per interval. We may want
+ * longer intervals for this eventually, could do with mod, counter
+ * or separate timer. Also see code in ipath_handle_errors() and
+ * ipath_handle_hwerrors().
+ */
+
+ if (dd->ipath_lasterror)
+ dd->ipath_lasterror = 0;
+ if (dd->ipath_lasthwerror)
+ dd->ipath_lasthwerror = 0;
+ if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
+ && time_after(jiffies, dd->ipath_unmasktime)) {
+ char ebuf[256];
+ ipath_decode_err(ebuf, sizeof ebuf,
+ (dd->ipath_maskederrs & ~dd->
+ ipath_ignorederrs));
+ if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
+ ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
+ ipath_dev_err(dd, "Re-enabling masked errors "
+ "(%s)\n", ebuf);
+ else {
+ /*
+ * rcvegrfull and rcvhdrqfull are "normal", for some
+ * types of processes (mostly benchmarks) that send
+ * huge numbers of messages, while not processing
+ * them. So only complain about these at debug
+ * level.
+ */
+ ipath_dbg("Disabling frequent queue full errors "
+ "(%s)\n", ebuf);
+ }
+ dd->ipath_maskederrs = dd->ipath_ignorederrs;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
+ ~dd->ipath_maskederrs);
+ }
+
+ /* limit qfull messages to ~one per minute per port */
+ if ((++cnt & 0x10)) {
+ for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
+ val--) {
+ if (dd->ipath_lastegrheads[val] != -1)
+ dd->ipath_lastegrheads[val] = -1;
+ if (dd->ipath_lastrcvhdrqtails[val] != -1)
+ dd->ipath_lastrcvhdrqtails[val] = -1;
+ }
+ }
+
+ if (dd->ipath_nosma_bufs) {
+ dd->ipath_nosma_secs += 5;
+ if (dd->ipath_nosma_secs >= 30) {
+ ipath_cdbg(SMA, "No SMA bufs avail %u seconds; "
+ "cancelling pending sends\n",
+ dd->ipath_nosma_secs);
+ /*
+ * issue an abort as well, in case we have a packet
+ * stuck in launch fifo. This could corrupt an
+ * outgoing user packet in the worst case,
+ * but this is a pretty catastrophic, anyway.
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ INFINIPATH_S_ABORT);
+ ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
+ dd->ipath_piobcnt2k +
+ dd->ipath_piobcnt4k -
+ dd->ipath_lastport_piobuf);
+ /* start again, if necessary */
+ dd->ipath_nosma_secs = 0;
+ } else
+ ipath_cdbg(SMA, "No SMA bufs avail %u tries, "
+ "after %u seconds\n",
+ dd->ipath_nosma_bufs,
+ dd->ipath_nosma_secs);
+ }
+
+done:
+ mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
new file mode 100644
index 0000000000000..32acd8048b499
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ctype.h>
+#include <linux/pci.h>
+
+#include "ipath_kernel.h"
+#include "ips_common.h"
+#include "ipath_layer.h"
+
+/**
+ * ipath_parse_ushort - parse an unsigned short value in an arbitrary base
+ * @str: the string containing the number
+ * @valp: where to put the result
+ *
+ * returns the number of bytes consumed, or negative value on error
+ */
+int ipath_parse_ushort(const char *str, unsigned short *valp)
+{
+ unsigned long val;
+ char *end;
+ int ret;
+
+ if (!isdigit(str[0])) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ val = simple_strtoul(str, &end, 0);
+
+ if (val > 0xffff) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *valp = val;
+
+ ret = end + 1 - str;
+ if (ret == 0)
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+static ssize_t show_version(struct device_driver *dev, char *buf)
+{
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", ipath_core_version);
+}
+
+static ssize_t show_num_units(struct device_driver *dev, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ ipath_count_units(NULL, NULL, NULL));
+}
+
+#define DRIVER_STAT(name, attr) \
+ static ssize_t show_stat_##name(struct device_driver *dev, \
+ char *buf) \
+ { \
+ return scnprintf( \
+ buf, PAGE_SIZE, "%llu\n", \
+ (unsigned long long) ipath_stats.sps_ ##attr); \
+ } \
+ static DRIVER_ATTR(name, S_IRUGO, show_stat_##name, NULL)
+
+DRIVER_STAT(intrs, ints);
+DRIVER_STAT(err_intrs, errints);
+DRIVER_STAT(errs, errs);
+DRIVER_STAT(pkt_errs, pkterrs);
+DRIVER_STAT(crc_errs, crcerrs);
+DRIVER_STAT(hw_errs, hwerrs);
+DRIVER_STAT(ib_link, iblink);
+DRIVER_STAT(port0_pkts, port0pkts);
+DRIVER_STAT(ether_spkts, ether_spkts);
+DRIVER_STAT(ether_rpkts, ether_rpkts);
+DRIVER_STAT(sma_spkts, sma_spkts);
+DRIVER_STAT(sma_rpkts, sma_rpkts);
+DRIVER_STAT(hdrq_full, hdrqfull);
+DRIVER_STAT(etid_full, etidfull);
+DRIVER_STAT(no_piobufs, nopiobufs);
+DRIVER_STAT(ports, ports);
+DRIVER_STAT(pkey0, pkeys[0]);
+DRIVER_STAT(pkey1, pkeys[1]);
+DRIVER_STAT(pkey2, pkeys[2]);
+DRIVER_STAT(pkey3, pkeys[3]);
+/* XXX fix the following when dynamic table of devices used */
+DRIVER_STAT(lid0, lid[0]);
+DRIVER_STAT(lid1, lid[1]);
+DRIVER_STAT(lid2, lid[2]);
+DRIVER_STAT(lid3, lid[3]);
+
+DRIVER_STAT(nports, nports);
+DRIVER_STAT(null_intr, nullintr);
+DRIVER_STAT(max_pkts_call, maxpkts_call);
+DRIVER_STAT(avg_pkts_call, avgpkts_call);
+DRIVER_STAT(page_locks, pagelocks);
+DRIVER_STAT(page_unlocks, pageunlocks);
+DRIVER_STAT(krdrops, krdrops);
+/* XXX fix the following when dynamic table of devices used */
+DRIVER_STAT(mlid0, mlid[0]);
+DRIVER_STAT(mlid1, mlid[1]);
+DRIVER_STAT(mlid2, mlid[2]);
+DRIVER_STAT(mlid3, mlid[3]);
+
+static struct attribute *driver_stat_attributes[] = {
+ &driver_attr_intrs.attr,
+ &driver_attr_err_intrs.attr,
+ &driver_attr_errs.attr,
+ &driver_attr_pkt_errs.attr,
+ &driver_attr_crc_errs.attr,
+ &driver_attr_hw_errs.attr,
+ &driver_attr_ib_link.attr,
+ &driver_attr_port0_pkts.attr,
+ &driver_attr_ether_spkts.attr,
+ &driver_attr_ether_rpkts.attr,
+ &driver_attr_sma_spkts.attr,
+ &driver_attr_sma_rpkts.attr,
+ &driver_attr_hdrq_full.attr,
+ &driver_attr_etid_full.attr,
+ &driver_attr_no_piobufs.attr,
+ &driver_attr_ports.attr,
+ &driver_attr_pkey0.attr,
+ &driver_attr_pkey1.attr,
+ &driver_attr_pkey2.attr,
+ &driver_attr_pkey3.attr,
+ &driver_attr_lid0.attr,
+ &driver_attr_lid1.attr,
+ &driver_attr_lid2.attr,
+ &driver_attr_lid3.attr,
+ &driver_attr_nports.attr,
+ &driver_attr_null_intr.attr,
+ &driver_attr_max_pkts_call.attr,
+ &driver_attr_avg_pkts_call.attr,
+ &driver_attr_page_locks.attr,
+ &driver_attr_page_unlocks.attr,
+ &driver_attr_krdrops.attr,
+ &driver_attr_mlid0.attr,
+ &driver_attr_mlid1.attr,
+ &driver_attr_mlid2.attr,
+ &driver_attr_mlid3.attr,
+ NULL
+};
+
+static struct attribute_group driver_stat_attr_group = {
+ .name = "stats",
+ .attrs = driver_stat_attributes
+};
+
+static ssize_t show_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ if (!dd->ipath_statusp) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long) *(dd->ipath_statusp));
+
+bail:
+ return ret;
+}
+
+static const char *ipath_status_str[] = {
+ "Initted",
+ "Disabled",
+ "Admin_Disabled",
+ "OIB_SMA",
+ "SMA",
+ "Present",
+ "IB_link_up",
+ "IB_configured",
+ "NoIBcable",
+ "Fatal_Hardware_Error",
+ NULL,
+};
+
+static ssize_t show_status_str(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ int i, any;
+ u64 s;
+ ssize_t ret;
+
+ if (!dd->ipath_statusp) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ s = *(dd->ipath_statusp);
+ *buf = '\0';
+ for (any = i = 0; s && ipath_status_str[i]; i++) {
+ if (s & 1) {
+ if (any && strlcat(buf, " ", PAGE_SIZE) >=
+ PAGE_SIZE)
+ /* overflow */
+ break;
+ if (strlcat(buf, ipath_status_str[i],
+ PAGE_SIZE) >= PAGE_SIZE)
+ break;
+ any = 1;
+ }
+ s >>= 1;
+ }
+ if (any)
+ strlcat(buf, "\n", PAGE_SIZE);
+
+ ret = strlen(buf);
+
+bail:
+ return ret;
+}
+
+static ssize_t show_boardversion(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
+}
+
+static ssize_t show_lid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_lid);
+}
+
+static ssize_t store_lid(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ u16 lid;
+ int ret;
+
+ ret = ipath_parse_ushort(buf, &lid);
+ if (ret < 0)
+ goto invalid;
+
+ if (lid == 0 || lid >= 0xc000) {
+ ret = -EINVAL;
+ goto invalid;
+ }
+
+ ipath_set_sps_lid(dd, lid, 0);
+
+ goto bail;
+invalid:
+ ipath_dev_err(dd, "attempt to set invalid LID\n");
+bail:
+ return ret;
+}
+
+static ssize_t show_mlid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_mlid);
+}
+
+static ssize_t store_mlid(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ int unit;
+ u16 mlid;
+ int ret;
+
+ ret = ipath_parse_ushort(buf, &mlid);
+ if (ret < 0)
+ goto invalid;
+
+ unit = dd->ipath_unit;
+
+ dd->ipath_mlid = mlid;
+ ipath_stats.sps_mlid[unit] = mlid;
+ ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST);
+
+ goto bail;
+invalid:
+ ipath_dev_err(dd, "attempt to set invalid MLID\n");
+bail:
+ return ret;
+}
+
+static ssize_t show_guid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ u8 *guid;
+
+ guid = (u8 *) & (dd->ipath_guid);
+
+ return scnprintf(buf, PAGE_SIZE,
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ guid[0], guid[1], guid[2], guid[3],
+ guid[4], guid[5], guid[6], guid[7]);
+}
+
+static ssize_t store_guid(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ ssize_t ret;
+ unsigned short guid[8];
+ __be64 nguid;
+ u8 *ng;
+ int i;
+
+ if (sscanf(buf, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx",
+ &guid[0], &guid[1], &guid[2], &guid[3],
+ &guid[4], &guid[5], &guid[6], &guid[7]) != 8)
+ goto invalid;
+
+ ng = (u8 *) &nguid;
+
+ for (i = 0; i < 8; i++) {
+ if (guid[i] > 0xff)
+ goto invalid;
+ ng[i] = guid[i];
+ }
+
+ dd->ipath_guid = nguid;
+ dd->ipath_nguid = 1;
+
+ ret = strlen(buf);
+ goto bail;
+
+invalid:
+ ipath_dev_err(dd, "attempt to set invalid GUID\n");
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+static ssize_t show_nguid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid);
+}
+
+static ssize_t show_serial(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+
+ buf[sizeof dd->ipath_serial] = '\0';
+ memcpy(buf, dd->ipath_serial, sizeof dd->ipath_serial);
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t show_unit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
+}
+
+#define DEVICE_COUNTER(name, attr) \
+ static ssize_t show_counter_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct ipath_devdata *dd = dev_get_drvdata(dev); \
+ return scnprintf(\
+ buf, PAGE_SIZE, "%llu\n", (unsigned long long) \
+ ipath_snap_cntr( \
+ dd, offsetof(struct infinipath_counters, \
+ attr) / sizeof(u64))); \
+ } \
+ static DEVICE_ATTR(name, S_IRUGO, show_counter_##name, NULL);
+
+DEVICE_COUNTER(ib_link_downeds, IBLinkDownedCnt);
+DEVICE_COUNTER(ib_link_err_recoveries, IBLinkErrRecoveryCnt);
+DEVICE_COUNTER(ib_status_changes, IBStatusChangeCnt);
+DEVICE_COUNTER(ib_symbol_errs, IBSymbolErrCnt);
+DEVICE_COUNTER(lb_flow_stalls, LBFlowStallCnt);
+DEVICE_COUNTER(lb_ints, LBIntCnt);
+DEVICE_COUNTER(rx_bad_formats, RxBadFormatCnt);
+DEVICE_COUNTER(rx_buf_ovfls, RxBufOvflCnt);
+DEVICE_COUNTER(rx_data_pkts, RxDataPktCnt);
+DEVICE_COUNTER(rx_dropped_pkts, RxDroppedPktCnt);
+DEVICE_COUNTER(rx_dwords, RxDwordCnt);
+DEVICE_COUNTER(rx_ebps, RxEBPCnt);
+DEVICE_COUNTER(rx_flow_ctrl_errs, RxFlowCtrlErrCnt);
+DEVICE_COUNTER(rx_flow_pkts, RxFlowPktCnt);
+DEVICE_COUNTER(rx_icrc_errs, RxICRCErrCnt);
+DEVICE_COUNTER(rx_len_errs, RxLenErrCnt);
+DEVICE_COUNTER(rx_link_problems, RxLinkProblemCnt);
+DEVICE_COUNTER(rx_lpcrc_errs, RxLPCRCErrCnt);
+DEVICE_COUNTER(rx_max_min_len_errs, RxMaxMinLenErrCnt);
+DEVICE_COUNTER(rx_p0_hdr_egr_ovfls, RxP0HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_p1_hdr_egr_ovfls, RxP1HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_p2_hdr_egr_ovfls, RxP2HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_p3_hdr_egr_ovfls, RxP3HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_p4_hdr_egr_ovfls, RxP4HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_p5_hdr_egr_ovfls, RxP5HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_p6_hdr_egr_ovfls, RxP6HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_p7_hdr_egr_ovfls, RxP7HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_p8_hdr_egr_ovfls, RxP8HdrEgrOvflCnt);
+DEVICE_COUNTER(rx_pkey_mismatches, RxPKeyMismatchCnt);
+DEVICE_COUNTER(rx_tid_full_errs, RxTIDFullErrCnt);
+DEVICE_COUNTER(rx_tid_valid_errs, RxTIDValidErrCnt);
+DEVICE_COUNTER(rx_vcrc_errs, RxVCRCErrCnt);
+DEVICE_COUNTER(tx_data_pkts, TxDataPktCnt);
+DEVICE_COUNTER(tx_dropped_pkts, TxDroppedPktCnt);
+DEVICE_COUNTER(tx_dwords, TxDwordCnt);
+DEVICE_COUNTER(tx_flow_pkts, TxFlowPktCnt);
+DEVICE_COUNTER(tx_flow_stalls, TxFlowStallCnt);
+DEVICE_COUNTER(tx_len_errs, TxLenErrCnt);
+DEVICE_COUNTER(tx_max_min_len_errs, TxMaxMinLenErrCnt);
+DEVICE_COUNTER(tx_underruns, TxUnderrunCnt);
+DEVICE_COUNTER(tx_unsup_vl_errs, TxUnsupVLErrCnt);
+
+static struct attribute *dev_counter_attributes[] = {
+ &dev_attr_ib_link_downeds.attr,
+ &dev_attr_ib_link_err_recoveries.attr,
+ &dev_attr_ib_status_changes.attr,
+ &dev_attr_ib_symbol_errs.attr,
+ &dev_attr_lb_flow_stalls.attr,
+ &dev_attr_lb_ints.attr,
+ &dev_attr_rx_bad_formats.attr,
+ &dev_attr_rx_buf_ovfls.attr,
+ &dev_attr_rx_data_pkts.attr,
+ &dev_attr_rx_dropped_pkts.attr,
+ &dev_attr_rx_dwords.attr,
+ &dev_attr_rx_ebps.attr,
+ &dev_attr_rx_flow_ctrl_errs.attr,
+ &dev_attr_rx_flow_pkts.attr,
+ &dev_attr_rx_icrc_errs.attr,
+ &dev_attr_rx_len_errs.attr,
+ &dev_attr_rx_link_problems.attr,
+ &dev_attr_rx_lpcrc_errs.attr,
+ &dev_attr_rx_max_min_len_errs.attr,
+ &dev_attr_rx_p0_hdr_egr_ovfls.attr,
+ &dev_attr_rx_p1_hdr_egr_ovfls.attr,
+ &dev_attr_rx_p2_hdr_egr_ovfls.attr,
+ &dev_attr_rx_p3_hdr_egr_ovfls.attr,
+ &dev_attr_rx_p4_hdr_egr_ovfls.attr,
+ &dev_attr_rx_p5_hdr_egr_ovfls.attr,
+ &dev_attr_rx_p6_hdr_egr_ovfls.attr,
+ &dev_attr_rx_p7_hdr_egr_ovfls.attr,
+ &dev_attr_rx_p8_hdr_egr_ovfls.attr,
+ &dev_attr_rx_pkey_mismatches.attr,
+ &dev_attr_rx_tid_full_errs.attr,
+ &dev_attr_rx_tid_valid_errs.attr,
+ &dev_attr_rx_vcrc_errs.attr,
+ &dev_attr_tx_data_pkts.attr,
+ &dev_attr_tx_dropped_pkts.attr,
+ &dev_attr_tx_dwords.attr,
+ &dev_attr_tx_flow_pkts.attr,
+ &dev_attr_tx_flow_stalls.attr,
+ &dev_attr_tx_len_errs.attr,
+ &dev_attr_tx_max_min_len_errs.attr,
+ &dev_attr_tx_underruns.attr,
+ &dev_attr_tx_unsup_vl_errs.attr,
+ NULL
+};
+
+static struct attribute_group dev_counter_attr_group = {
+ .name = "counters",
+ .attrs = dev_counter_attributes
+};
+
+static ssize_t store_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ int ret;
+
+ if (count < 5 || memcmp(buf, "reset", 5)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (dd->ipath_flags & IPATH_DISABLED) {
+ /*
+ * post-reset init would re-enable interrupts, etc.
+ * so don't allow reset on disabled devices. Not
+ * perfect error, but about the best choice.
+ */
+ dev_info(dev,"Unit %d is disabled, can't reset\n",
+ dd->ipath_unit);
+ ret = -EINVAL;
+ }
+ ret = ipath_reset_device(dd->ipath_unit);
+bail:
+ return ret<0 ? ret : count;
+}
+
+static ssize_t store_link_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ int ret, r;
+ u16 state;
+
+ ret = ipath_parse_ushort(buf, &state);
+ if (ret < 0)
+ goto invalid;
+
+ r = ipath_layer_set_linkstate(dd, state);
+ if (r < 0) {
+ ret = r;
+ goto bail;
+ }
+
+ goto bail;
+invalid:
+ ipath_dev_err(dd, "attempt to set invalid link state\n");
+bail:
+ return ret;
+}
+
+static ssize_t show_mtu(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_ibmtu);
+}
+
+static ssize_t store_mtu(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ ssize_t ret;
+ u16 mtu = 0;
+ int r;
+
+ ret = ipath_parse_ushort(buf, &mtu);
+ if (ret < 0)
+ goto invalid;
+
+ r = ipath_layer_set_mtu(dd, mtu);
+ if (r < 0)
+ ret = r;
+
+ goto bail;
+invalid:
+ ipath_dev_err(dd, "attempt to set invalid MTU\n");
+bail:
+ return ret;
+}
+
+static ssize_t show_enabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (dd->ipath_flags & IPATH_DISABLED) ? 0 : 1);
+}
+
+static ssize_t store_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ ssize_t ret;
+ u16 enable = 0;
+
+ ret = ipath_parse_ushort(buf, &enable);
+ if (ret < 0) {
+ ipath_dev_err(dd, "attempt to use non-numeric on enable\n");
+ goto bail;
+ }
+
+ if (enable) {
+ if (!(dd->ipath_flags & IPATH_DISABLED))
+ goto bail;
+
+ dev_info(dev, "Enabling unit %d\n", dd->ipath_unit);
+ /* same as post-reset */
+ ret = ipath_init_chip(dd, 1);
+ if (ret)
+ ipath_dev_err(dd, "Failed to enable unit %d\n",
+ dd->ipath_unit);
+ else {
+ dd->ipath_flags &= ~IPATH_DISABLED;
+ *dd->ipath_statusp &= ~IPATH_STATUS_ADMIN_DISABLED;
+ }
+ }
+ else if (!(dd->ipath_flags & IPATH_DISABLED)) {
+ dev_info(dev, "Disabling unit %d\n", dd->ipath_unit);
+ ipath_shutdown_device(dd);
+ dd->ipath_flags |= IPATH_DISABLED;
+ *dd->ipath_statusp |= IPATH_STATUS_ADMIN_DISABLED;
+ }
+
+bail:
+ return ret;
+}
+
+static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
+static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
+
+static struct attribute *driver_attributes[] = {
+ &driver_attr_num_units.attr,
+ &driver_attr_version.attr,
+ NULL
+};
+
+static struct attribute_group driver_attr_group = {
+ .attrs = driver_attributes
+};
+
+static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
+static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
+static DEVICE_ATTR(link_state, S_IWUSR, NULL, store_link_state);
+static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid);
+static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu);
+static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled);
+static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL);
+static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset);
+static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
+static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
+
+static struct attribute *dev_attributes[] = {
+ &dev_attr_guid.attr,
+ &dev_attr_lid.attr,
+ &dev_attr_link_state.attr,
+ &dev_attr_mlid.attr,
+ &dev_attr_mtu.attr,
+ &dev_attr_nguid.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_status.attr,
+ &dev_attr_status_str.attr,
+ &dev_attr_boardversion.attr,
+ &dev_attr_unit.attr,
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group dev_attr_group = {
+ .attrs = dev_attributes
+};
+
+/**
+ * ipath_expose_reset - create a device reset file
+ * @dev: the device structure
+ *
+ * Only expose a file that lets us reset the device after someone
+ * enters diag mode. A device reset is quite likely to crash the
+ * machine entirely, so we don't want to normally make it
+ * available.
+ */
+int ipath_expose_reset(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_reset);
+}
+
+int ipath_driver_create_group(struct device_driver *drv)
+{
+ int ret;
+
+ ret = sysfs_create_group(&drv->kobj, &driver_attr_group);
+ if (ret)
+ goto bail;
+
+ ret = sysfs_create_group(&drv->kobj, &driver_stat_attr_group);
+ if (ret)
+ sysfs_remove_group(&drv->kobj, &driver_attr_group);
+
+bail:
+ return ret;
+}
+
+void ipath_driver_remove_group(struct device_driver *drv)
+{
+ sysfs_remove_group(&drv->kobj, &driver_stat_attr_group);
+ sysfs_remove_group(&drv->kobj, &driver_attr_group);
+}
+
+int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
+{
+ int ret;
+ char unit[5];
+
+ ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
+ if (ret)
+ goto bail;
+
+ ret = sysfs_create_group(&dev->kobj, &dev_counter_attr_group);
+ if (ret)
+ goto bail_attrs;
+
+ snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit);
+ ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj, unit);
+ if (ret == 0)
+ goto bail;
+
+ sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
+bail_attrs:
+ sysfs_remove_group(&dev->kobj, &dev_attr_group);
+bail:
+ return ret;
+}
+
+void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
+{
+ char unit[5];
+
+ snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit);
+ sysfs_remove_link(&dev->driver->kobj, unit);
+
+ sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
+ sysfs_remove_group(&dev->kobj, &dev_attr_group);
+
+ device_remove_file(dev, &dev_attr_reset);
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
new file mode 100644
index 0000000000000..0d6dbc0a541e3
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ipath_verbs.h"
+#include "ips_common.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_UC_##x
+
+static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
+ struct ib_wc *wc)
+{
+ if (++qp->s_last == qp->s_size)
+ qp->s_last = 0;
+ if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = IB_WC_SUCCESS;
+ wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ wc->vendor_err = 0;
+ wc->byte_len = wqe->length;
+ wc->qp_num = qp->ibqp.qp_num;
+ wc->src_qp = qp->remote_qpn;
+ wc->pkey_index = 0;
+ wc->slid = qp->remote_ah_attr.dlid;
+ wc->sl = qp->remote_ah_attr.sl;
+ wc->dlid_path_bits = 0;
+ wc->port_num = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0);
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+}
+
+/**
+ * ipath_do_uc_send - do a send on a UC queue
+ * @data: contains a pointer to the QP to send on
+ *
+ * Process entries in the send work queue until the queue is exhausted.
+ * Only allow one CPU to send a packet per QP (tasklet).
+ * Otherwise, after we drop the QP lock, two threads could send
+ * packets out of order.
+ * This is similar to ipath_do_rc_send() below except we don't have
+ * timeouts or resends.
+ */
+void ipath_do_uc_send(unsigned long data)
+{
+ struct ipath_qp *qp = (struct ipath_qp *)data;
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ipath_swqe *wqe;
+ unsigned long flags;
+ u16 lrh0;
+ u32 hwords;
+ u32 nwords;
+ u32 extra_bytes;
+ u32 bth0;
+ u32 bth2;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 len;
+ struct ipath_other_headers *ohdr;
+ struct ib_wc wc;
+
+ if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
+ goto bail;
+
+ if (unlikely(qp->remote_ah_attr.dlid ==
+ ipath_layer_get_lid(dev->dd))) {
+ /* Pass in an uninitialized ib_wc to save stack space. */
+ ipath_ruc_loopback(qp, &wc);
+ clear_bit(IPATH_S_BUSY, &qp->s_flags);
+ goto bail;
+ }
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+again:
+ /* Check for a constructed packet to be sent. */
+ if (qp->s_hdrwords != 0) {
+ /*
+ * If no PIO bufs are available, return.
+ * An interrupt will call ipath_ib_piobufavail()
+ * when one is available.
+ */
+ if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
+ (u32 *) &qp->s_hdr,
+ qp->s_cur_size,
+ qp->s_cur_sge)) {
+ ipath_no_bufs_available(qp, dev);
+ goto bail;
+ }
+ dev->n_unicast_xmit++;
+ /* Record that we sent the packet and s_hdr is empty. */
+ qp->s_hdrwords = 0;
+ }
+
+ lrh0 = IPS_LRH_BTH;
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+
+ /*
+ * The lock is needed to synchronize between
+ * setting qp->s_ack_state and post_send().
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
+ goto done;
+
+ bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+
+ /* Send a request. */
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ switch (qp->s_state) {
+ default:
+ /*
+ * Signal the completion of the last send (if there is
+ * one).
+ */
+ if (qp->s_last != qp->s_tail)
+ complete_last_send(qp, wqe, &wc);
+
+ /* Check if send work queue is empty. */
+ if (qp->s_tail == qp->s_head)
+ goto done;
+ /*
+ * Start a new request.
+ */
+ qp->s_psn = wqe->psn = qp->s_next_psn;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_len = len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ if (len > pmtu) {
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state =
+ OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / 4;
+ if (len > pmtu) {
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the RETH */
+ ohdr->u.rc.imm_data = wqe->wr.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ }
+ break;
+
+ default:
+ goto done;
+ }
+ if (++qp->s_tail >= qp->s_size)
+ qp->s_tail = 0;
+ break;
+
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ }
+ break;
+ }
+ bth2 = qp->s_next_psn++ & IPS_PSN_MASK;
+ qp->s_len -= len;
+ bth0 |= qp->s_state << 24;
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Construct the header. */
+ extra_bytes = (4 - len) & 3;
+ nwords = (len + extra_bytes) >> 2;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ /* Header size in 32-bit words. */
+ hwords += 10;
+ lrh0 = IPS_LRH_GRH;
+ qp->s_hdr.u.l.grh.version_tclass_flow =
+ cpu_to_be32((6 << 28) |
+ (qp->remote_ah_attr.grh.traffic_class
+ << 20) |
+ qp->remote_ah_attr.grh.flow_label);
+ qp->s_hdr.u.l.grh.paylen =
+ cpu_to_be16(((hwords - 12) + nwords +
+ SIZE_OF_CRC) << 2);
+ /* next_hdr is defined by C8-7 in ch. 8.4.1 */
+ qp->s_hdr.u.l.grh.next_hdr = 0x1B;
+ qp->s_hdr.u.l.grh.hop_limit =
+ qp->remote_ah_attr.grh.hop_limit;
+ /* The SGID is 32-bit aligned. */
+ qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
+ dev->gid_prefix;
+ qp->s_hdr.u.l.grh.sgid.global.interface_id =
+ ipath_layer_get_guid(dev->dd);
+ qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid;
+ }
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_cur_size = len;
+ lrh0 |= qp->remote_ah_attr.sl << 4;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ /* DEST LID */
+ qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
+ qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
+ bth0 |= extra_bytes << 20;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(bth2);
+
+ /* Check for more work to do. */
+ goto again;
+
+done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ clear_bit(IPATH_S_BUSY, &qp->s_flags);
+
+bail:
+ return;
+}
+
+/**
+ * ipath_uc_rcv - handle an incoming UC packet
+ * @dev: the device the packet came in on
+ * @hdr: the header of the packet
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the length of the packet
+ * @qp: the QP for this packet.
+ *
+ * This is called from ipath_qp_rcv() to process an incoming UC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
+{
+ struct ipath_other_headers *ohdr;
+ int opcode;
+ u32 hdrsize;
+ u32 psn;
+ u32 pad;
+ unsigned long flags;
+ struct ib_wc wc;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ struct ib_reth *reth;
+ int header_in_data;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12; /* LRH + BTH */
+ psn = be32_to_cpu(ohdr->bth[2]);
+ header_in_data = 0;
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
+ /*
+ * The header with GRH is 60 bytes and the
+ * core driver sets the eager header buffer
+ * size to 56 bytes so the last 4 bytes of
+ * the BTH header (PSN) is in the data buffer.
+ */
+ header_in_data =
+ ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+ if (header_in_data) {
+ psn = be32_to_cpu(((__be32 *) data)[0]);
+ data += sizeof(__be32);
+ } else
+ psn = be32_to_cpu(ohdr->bth[2]);
+ }
+ /*
+ * The opcode is in the low byte when its in network order
+ * (top byte when in host order).
+ */
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+
+ wc.imm_data = 0;
+ wc.wc_flags = 0;
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+
+ /* Compare the PSN verses the expected PSN. */
+ if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
+ /*
+ * Handle a sequence error.
+ * Silently drop any current message.
+ */
+ qp->r_psn = psn;
+ inv:
+ qp->r_state = OP(SEND_LAST);
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ goto send_first;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ goto rdma_first;
+
+ default:
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ default:
+ if (opcode == OP(SEND_FIRST) ||
+ opcode == OP(SEND_ONLY) ||
+ opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_FIRST) ||
+ opcode == OP(RDMA_WRITE_ONLY) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ break;
+ goto inv;
+ }
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ send_first:
+ if (qp->r_reuse_sge) {
+ qp->r_reuse_sge = 0;
+ qp->r_sge = qp->s_rdma_sge;
+ } else if (!ipath_get_rwqe(qp, 0)) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ /* Save the WQE so we can reuse it in case of an error. */
+ qp->s_rdma_sge = qp->r_sge;
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto send_last;
+ else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
+ goto send_last_imm;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4))) {
+ qp->r_reuse_sge = 1;
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len)) {
+ qp->r_reuse_sge = 1;
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ ipath_copy_sge(&qp->r_sge, data, pmtu);
+ break;
+
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+ send_last_imm:
+ if (header_in_data) {
+ wc.imm_data = *(__be32 *) data;
+ data += sizeof(__be32);
+ } else {
+ /* Immediate data comes after BTH */
+ wc.imm_data = ohdr->u.imm_data;
+ }
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST):
+ send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4))) {
+ qp->r_reuse_sge = 1;
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_reuse_sge = 1;
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ /* XXX Need to free SGEs */
+ last_imm:
+ ipath_copy_sge(&qp->r_sge, data, tlen);
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = qp->remote_qpn;
+ wc.pkey_index = 0;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ /* Signal completion event if the solicited bit is set. */
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ __constant_cpu_to_be32(1 << 23)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
+ rdma_first:
+ /* RETH comes after BTH */
+ if (!header_in_data)
+ reth = &ohdr->u.rc.reth;
+ else {
+ reth = (struct ib_reth *)data;
+ data += sizeof(*reth);
+ }
+ hdrsize += sizeof(*reth);
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+
+ /* Check rkey */
+ if (unlikely(!ipath_rkey_ok(
+ dev, &qp->r_sge, qp->r_len,
+ vaddr, rkey,
+ IB_ACCESS_REMOTE_WRITE))) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ } else {
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE))) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ if (opcode == OP(RDMA_WRITE_ONLY))
+ goto rdma_last;
+ else if (opcode ==
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ goto rdma_last_imm;
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4))) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len)) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ ipath_copy_sge(&qp->r_sge, data, pmtu);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ rdma_last_imm:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4))) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ if (qp->r_reuse_sge) {
+ qp->r_reuse_sge = 0;
+ } else if (!ipath_get_rwqe(qp, 1)) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ if (header_in_data) {
+ wc.imm_data = *(__be32 *) data;
+ data += sizeof(__be32);
+ } else {
+ /* Immediate data comes after BTH */
+ wc.imm_data = ohdr->u.imm_data;
+ }
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.byte_len = 0;
+ goto last_imm;
+
+ case OP(RDMA_WRITE_LAST):
+ rdma_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4))) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ ipath_copy_sge(&qp->r_sge, data, tlen);
+ break;
+
+ default:
+ /* Drop packet for unknown opcodes. */
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+done:
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+
+bail:
+ return;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
new file mode 100644
index 0000000000000..5ff3de6128b2f
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "ipath_verbs.h"
+#include "ips_common.h"
+
+/**
+ * ipath_ud_loopback - handle send on loopback QPs
+ * @sqp: the QP
+ * @ss: the SGE state
+ * @length: the length of the data to send
+ * @wr: the work request
+ * @wc: the work completion entry
+ *
+ * This is called from ipath_post_ud_send() to forward a WQE addressed
+ * to the same HCA.
+ */
+void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss,
+ u32 length, struct ib_send_wr *wr, struct ib_wc *wc)
+{
+ struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
+ struct ipath_qp *qp;
+ struct ib_ah_attr *ah_attr;
+ unsigned long flags;
+ struct ipath_rq *rq;
+ struct ipath_srq *srq;
+ struct ipath_sge_state rsge;
+ struct ipath_sge *sge;
+ struct ipath_rwqe *wqe;
+
+ qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn);
+ if (!qp)
+ return;
+
+ /*
+ * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ if (unlikely(qp->ibqp.qp_num &&
+ ((int) wr->wr.ud.remote_qkey < 0
+ ? qp->qkey : wr->wr.ud.remote_qkey) != qp->qkey)) {
+ /* XXX OK to lose a count once in a while. */
+ dev->qkey_violations++;
+ dev->n_pkt_drops++;
+ goto done;
+ }
+
+ /*
+ * A GRH is expected to preceed the data even if not
+ * present on the wire.
+ */
+ wc->byte_len = length + sizeof(struct ib_grh);
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->imm_data = wr->imm_data;
+ } else {
+ wc->wc_flags = 0;
+ wc->imm_data = 0;
+ }
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ * Note that it is safe to drop the lock after changing rq->tail
+ * since ipath_post_receive() won't fill the empty slot.
+ */
+ if (qp->ibqp.srq) {
+ srq = to_isrq(qp->ibqp.srq);
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ rq = &qp->r_rq;
+ }
+ spin_lock_irqsave(&rq->lock, flags);
+ if (rq->tail == rq->head) {
+ spin_unlock_irqrestore(&rq->lock, flags);
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ /* Silently drop packets which are too big. */
+ wqe = get_rwqe_ptr(rq, rq->tail);
+ if (wc->byte_len > wqe->length) {
+ spin_unlock_irqrestore(&rq->lock, flags);
+ dev->n_pkt_drops++;
+ goto done;
+ }
+ wc->wr_id = wqe->wr_id;
+ rsge.sge = wqe->sg_list[0];
+ rsge.sg_list = wqe->sg_list + 1;
+ rsge.num_sge = wqe->num_sge;
+ if (++rq->tail >= rq->size)
+ rq->tail = 0;
+ if (srq && srq->ibsrq.event_handler) {
+ u32 n;
+
+ if (rq->head < rq->tail)
+ n = rq->size + rq->head - rq->tail;
+ else
+ n = rq->head - rq->tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ srq->ibsrq.event_handler(&ev,
+ srq->ibsrq.srq_context);
+ } else
+ spin_unlock_irqrestore(&rq->lock, flags);
+ } else
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ah_attr = &to_iah(wr->wr.ud.ah)->attr;
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
+ wc->wc_flags |= IB_WC_GRH;
+ } else
+ ipath_skip_sge(&rsge, sizeof(struct ib_grh));
+ sge = &ss->sge;
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ BUG_ON(len == 0);
+ ipath_copy_sge(&rsge, sge->vaddr, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr != NULL) {
+ if (++sge->n >= IPATH_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+ wc->status = IB_WC_SUCCESS;
+ wc->opcode = IB_WC_RECV;
+ wc->vendor_err = 0;
+ wc->qp_num = qp->ibqp.qp_num;
+ wc->src_qp = sqp->ibqp.qp_num;
+ /* XXX do we know which pkey matched? Only needed for GSI. */
+ wc->pkey_index = 0;
+ wc->slid = ipath_layer_get_lid(dev->dd) |
+ (ah_attr->src_path_bits &
+ ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1));
+ wc->sl = ah_attr->sl;
+ wc->dlid_path_bits =
+ ah_attr->dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
+ /* Signal completion event if the solicited bit is set. */
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
+ wr->send_flags & IB_SEND_SOLICITED);
+
+done:
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
+ * ipath_post_ud_send - post a UD send on QP
+ * @qp: the QP
+ * @wr: the work request
+ *
+ * Note that we actually send the data as it is posted instead of putting
+ * the request into a ring buffer. If we wanted to use a ring buffer,
+ * we would need to save a reference to the destination address in the SWQE.
+ */
+int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ipath_other_headers *ohdr;
+ struct ib_ah_attr *ah_attr;
+ struct ipath_sge_state ss;
+ struct ipath_sge *sg_list;
+ struct ib_wc wc;
+ u32 hwords;
+ u32 nwords;
+ u32 len;
+ u32 extra_bytes;
+ u32 bth0;
+ u16 lrh0;
+ u16 lid;
+ int i;
+ int ret;
+
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
+ ret = 0;
+ goto bail;
+ }
+
+ /* IB spec says that num_sge == 0 is OK. */
+ if (wr->num_sge > qp->s_max_sge) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (wr->num_sge > 1) {
+ sg_list = kmalloc((qp->s_max_sge - 1) * sizeof(*sg_list),
+ GFP_ATOMIC);
+ if (!sg_list) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ } else
+ sg_list = NULL;
+
+ /* Check the buffer to send. */
+ ss.sg_list = sg_list;
+ ss.sge.mr = NULL;
+ ss.sge.vaddr = NULL;
+ ss.sge.length = 0;
+ ss.sge.sge_length = 0;
+ ss.num_sge = 0;
+ len = 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ /* Check LKEY */
+ if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (wr->sg_list[i].length == 0)
+ continue;
+ if (!ipath_lkey_ok(&dev->lk_table, ss.num_sge ?
+ sg_list + ss.num_sge - 1 : &ss.sge,
+ &wr->sg_list[i], 0)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ len += wr->sg_list[i].length;
+ ss.num_sge++;
+ }
+ extra_bytes = (4 - len) & 3;
+ nwords = (len + extra_bytes) >> 2;
+
+ /* Construct the header. */
+ ah_attr = &to_iah(wr->wr.ud.ah)->attr;
+ if (ah_attr->dlid == 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE) {
+ if (ah_attr->dlid != IPS_PERMISSIVE_LID)
+ dev->n_multicast_xmit++;
+ else
+ dev->n_unicast_xmit++;
+ } else {
+ dev->n_unicast_xmit++;
+ lid = ah_attr->dlid &
+ ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
+ if (unlikely(lid == ipath_layer_get_lid(dev->dd))) {
+ /*
+ * Pass in an uninitialized ib_wc to save stack
+ * space.
+ */
+ ipath_ud_loopback(qp, &ss, len, wr, &wc);
+ goto done;
+ }
+ }
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ /* Header size in 32-bit words. */
+ hwords = 17;
+ lrh0 = IPS_LRH_GRH;
+ ohdr = &qp->s_hdr.u.l.oth;
+ qp->s_hdr.u.l.grh.version_tclass_flow =
+ cpu_to_be32((6 << 28) |
+ (ah_attr->grh.traffic_class << 20) |
+ ah_attr->grh.flow_label);
+ qp->s_hdr.u.l.grh.paylen =
+ cpu_to_be16(((wr->opcode ==
+ IB_WR_SEND_WITH_IMM ? 6 : 5) +
+ nwords + SIZE_OF_CRC) << 2);
+ /* next_hdr is defined by C8-7 in ch. 8.4.1 */
+ qp->s_hdr.u.l.grh.next_hdr = 0x1B;
+ qp->s_hdr.u.l.grh.hop_limit = ah_attr->grh.hop_limit;
+ /* The SGID is 32-bit aligned. */
+ qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
+ dev->gid_prefix;
+ qp->s_hdr.u.l.grh.sgid.global.interface_id =
+ ipath_layer_get_guid(dev->dd);
+ qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid;
+ /*
+ * Don't worry about sending to locally attached multicast
+ * QPs. It is unspecified by the spec. what happens.
+ */
+ } else {
+ /* Header size in 32-bit words. */
+ hwords = 7;
+ lrh0 = IPS_LRH_BTH;
+ ohdr = &qp->s_hdr.u.oth;
+ }
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ ohdr->u.ud.imm_data = wr->imm_data;
+ wc.imm_data = wr->imm_data;
+ hwords += 1;
+ bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+ } else if (wr->opcode == IB_WR_SEND) {
+ wc.imm_data = 0;
+ bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+ } else {
+ ret = -EINVAL;
+ goto bail;
+ }
+ lrh0 |= ah_attr->sl << 4;
+ if (qp->ibqp.qp_type == IB_QPT_SMI)
+ lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
+ qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
+ lid = ipath_layer_get_lid(dev->dd);
+ if (lid) {
+ lid |= ah_attr->src_path_bits &
+ ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
+ qp->s_hdr.lrh[3] = cpu_to_be16(lid);
+ } else
+ qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ bth0 |= 1 << 23;
+ bth0 |= extra_bytes << 20;
+ bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPS_DEFAULT_P_KEY :
+ ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ /*
+ * Use the multicast QP if the destination LID is a multicast LID.
+ */
+ ohdr->bth[1] = ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
+ ah_attr->dlid != IPS_PERMISSIVE_LID ?
+ __constant_cpu_to_be32(IPS_MULTICAST_QPN) :
+ cpu_to_be32(wr->wr.ud.remote_qpn);
+ /* XXX Could lose a PSN count but not worth locking */
+ ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPS_PSN_MASK);
+ /*
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ ohdr->u.ud.deth[0] = cpu_to_be32((int)wr->wr.ud.remote_qkey < 0 ?
+ qp->qkey : wr->wr.ud.remote_qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
+ if (ipath_verbs_send(dev->dd, hwords, (u32 *) &qp->s_hdr,
+ len, &ss))
+ dev->n_no_piobuf++;
+
+done:
+ /* Queue the completion status entry. */
+ if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
+ (wr->send_flags & IB_SEND_SIGNALED)) {
+ wc.wr_id = wr->wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.vendor_err = 0;
+ wc.opcode = IB_WC_SEND;
+ wc.byte_len = len;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = 0;
+ wc.wc_flags = 0;
+ /* XXX initialize other fields? */
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ kfree(sg_list);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_ud_rcv - receive an incoming UD packet
+ * @dev: the device the packet came in on
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from ipath_qp_rcv() to process an incoming UD packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
+{
+ struct ipath_other_headers *ohdr;
+ int opcode;
+ u32 hdrsize;
+ u32 pad;
+ unsigned long flags;
+ struct ib_wc wc;
+ u32 qkey;
+ u32 src_qp;
+ struct ipath_rq *rq;
+ struct ipath_srq *srq;
+ struct ipath_rwqe *wqe;
+ u16 dlid;
+ int header_in_data;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
+ qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
+ header_in_data = 0;
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
+ /*
+ * The header with GRH is 68 bytes and the core driver sets
+ * the eager header buffer size to 56 bytes so the last 12
+ * bytes of the IB header is in the data buffer.
+ */
+ header_in_data =
+ ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+ if (header_in_data) {
+ qkey = be32_to_cpu(((__be32 *) data)[1]);
+ src_qp = be32_to_cpu(((__be32 *) data)[2]);
+ data += 12;
+ } else {
+ qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
+ }
+ }
+ src_qp &= IPS_QPN_MASK;
+
+ /*
+ * Check that the permissive LID is only used on QP0
+ * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
+ */
+ if (qp->ibqp.qp_num) {
+ if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE)) {
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+ if (unlikely(qkey != qp->qkey)) {
+ /* XXX OK to lose a count once in a while. */
+ dev->qkey_violations++;
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+ } else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE) {
+ struct ib_smp *smp = (struct ib_smp *) data;
+
+ if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+ }
+
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ if (unlikely(tlen < (hdrsize + pad + 4))) {
+ /* Drop incomplete packets. */
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+ tlen -= hdrsize + pad + 4;
+
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (unlikely((qp->ibqp.qp_num == 0 &&
+ (tlen != 256 ||
+ (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
+ (qp->ibqp.qp_num == 1 &&
+ (tlen != 256 ||
+ (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+
+ /*
+ * A GRH is expected to preceed the data even if not
+ * present on the wire.
+ */
+ wc.byte_len = tlen + sizeof(struct ib_grh);
+
+ /*
+ * The opcode is in the low byte when its in network order
+ * (top byte when in host order).
+ */
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ if (qp->ibqp.qp_num > 1 &&
+ opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+ if (header_in_data) {
+ wc.imm_data = *(__be32 *) data;
+ data += sizeof(__be32);
+ } else
+ wc.imm_data = ohdr->u.ud.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ hdrsize += sizeof(u32);
+ } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+ wc.imm_data = 0;
+ wc.wc_flags = 0;
+ } else {
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ * Note that it is safe to drop the lock after changing rq->tail
+ * since ipath_post_receive() won't fill the empty slot.
+ */
+ if (qp->ibqp.srq) {
+ srq = to_isrq(qp->ibqp.srq);
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ rq = &qp->r_rq;
+ }
+ spin_lock_irqsave(&rq->lock, flags);
+ if (rq->tail == rq->head) {
+ spin_unlock_irqrestore(&rq->lock, flags);
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+ /* Silently drop packets which are too big. */
+ wqe = get_rwqe_ptr(rq, rq->tail);
+ if (wc.byte_len > wqe->length) {
+ spin_unlock_irqrestore(&rq->lock, flags);
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+ wc.wr_id = wqe->wr_id;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->num_sge;
+ if (++rq->tail >= rq->size)
+ rq->tail = 0;
+ if (srq && srq->ibsrq.event_handler) {
+ u32 n;
+
+ if (rq->head < rq->tail)
+ n = rq->size + rq->head - rq->tail;
+ else
+ n = rq->head - rq->tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ srq->ibsrq.event_handler(&ev,
+ srq->ibsrq.srq_context);
+ } else
+ spin_unlock_irqrestore(&rq->lock, flags);
+ } else
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (has_grh) {
+ ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
+ sizeof(struct ib_grh));
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
+ ipath_copy_sge(&qp->r_sge, data,
+ wc.byte_len - sizeof(struct ib_grh));
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = src_qp;
+ /* XXX do we know which pkey matched? Only needed for GSI. */
+ wc.pkey_index = 0;
+ wc.slid = be16_to_cpu(hdr->lrh[3]);
+ wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
+ dlid = be16_to_cpu(hdr->lrh[1]);
+ /*
+ * Save the LMC lower bits if the destination LID is a unicast LID.
+ */
+ wc.dlid_path_bits = dlid >= IPS_MULTICAST_LID_BASE ? 0 :
+ dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
+ /* Signal completion event if the solicited bit is set. */
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ __constant_cpu_to_be32(1 << 23)) != 0);
+
+bail:;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
new file mode 100644
index 0000000000000..2bb08afc86d01
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include "ipath_kernel.h"
+
+static void __ipath_release_user_pages(struct page **p, size_t num_pages,
+ int dirty)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages; i++) {
+ ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
+ (unsigned long) num_pages, p[i]);
+ if (dirty)
+ set_page_dirty_lock(p[i]);
+ put_page(p[i]);
+ }
+}
+
+/* call with current->mm->mmap_sem held */
+static int __get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p, struct vm_area_struct **vma)
+{
+ unsigned long lock_limit;
+ size_t got;
+ int ret;
+
+#if 0
+ /*
+ * XXX - causes MPI programs to fail, haven't had time to check
+ * yet
+ */
+ if (!capable(CAP_IPC_LOCK)) {
+ ret = -EPERM;
+ goto bail;
+ }
+#endif
+
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
+ PAGE_SHIFT;
+
+ if (num_pages > lock_limit) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
+ (unsigned long) num_pages, start_page);
+
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages(current, current->mm,
+ start_page + got * PAGE_SIZE,
+ num_pages - got, 1, 1,
+ p + got, vma);
+ if (ret < 0)
+ goto bail_release;
+ }
+
+ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+
+bail_release:
+ __ipath_release_user_pages(p, got, 0);
+bail:
+ return ret;
+}
+
+/**
+ * ipath_get_user_pages - lock user pages into memory
+ * @start_page: the start page
+ * @num_pages: the number of pages
+ * @p: the output page structures
+ *
+ * This function takes a given start page (page aligned user virtual
+ * address) and pins it and the following specified number of pages. For
+ * now, num_pages is always 1, but that will probably change at some point
+ * (because caller is doing expected sends on a single virtually contiguous
+ * buffer, so we can do all pages at once).
+ */
+int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p)
+{
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+
+ ret = __get_user_pages(start_page, num_pages, p, NULL);
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+/**
+ * ipath_get_user_pages_nocopy - lock a single page for I/O and mark shared
+ * @start_page: the page to lock
+ * @p: the output page structure
+ *
+ * This is similar to ipath_get_user_pages, but it's always one page, and we
+ * mark the page as locked for I/O, and shared. This is used for the user
+ * process page that contains the destination address for the rcvhdrq tail
+ * update, so we need to have the vma. If we don't do this, the page can be
+ * taken away from us on fork, even if the child never touches it, and then
+ * the user process never sees the tail register updates.
+ */
+int ipath_get_user_pages_nocopy(unsigned long page, struct page **p)
+{
+ struct vm_area_struct *vma;
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+
+ ret = __get_user_pages(page, 1, p, &vma);
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+void ipath_release_user_pages(struct page **p, size_t num_pages)
+{
+ down_write(&current->mm->mmap_sem);
+
+ __ipath_release_user_pages(p, num_pages, 1);
+
+ current->mm->locked_vm -= num_pages;
+
+ up_write(&current->mm->mmap_sem);
+}
+
+struct ipath_user_pages_work {
+ struct work_struct work;
+ struct mm_struct *mm;
+ unsigned long num_pages;
+};
+
+static void user_pages_account(void *ptr)
+{
+ struct ipath_user_pages_work *work = ptr;
+
+ down_write(&work->mm->mmap_sem);
+ work->mm->locked_vm -= work->num_pages;
+ up_write(&work->mm->mmap_sem);
+ mmput(work->mm);
+ kfree(work);
+}
+
+void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
+{
+ struct ipath_user_pages_work *work;
+ struct mm_struct *mm;
+
+ __ipath_release_user_pages(p, num_pages, 1);
+
+ mm = get_task_mm(current);
+ if (!mm)
+ goto bail;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ goto bail_mm;
+
+ goto bail;
+
+ INIT_WORK(&work->work, user_pages_account, work);
+ work->mm = mm;
+ work->num_pages = num_pages;
+
+bail_mm:
+ mmput(mm);
+bail:
+ return;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
new file mode 100644
index 0000000000000..9f27fd35cdbb7
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -0,0 +1,1222 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/utsname.h>
+
+#include "ipath_kernel.h"
+#include "ipath_verbs.h"
+#include "ips_common.h"
+
+/* Not static, because we don't want the compiler removing it */
+const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
+
+unsigned int ib_ipath_qp_table_size = 251;
+module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
+MODULE_PARM_DESC(qp_table_size, "QP table size");
+
+unsigned int ib_ipath_lkey_table_size = 12;
+module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
+ S_IRUGO);
+MODULE_PARM_DESC(lkey_table_size,
+ "LKEY table size in bits (2^n, 1 <= n <= 23)");
+
+unsigned int ib_ipath_debug; /* debug mask */
+module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(debug, "Verbs debug mask");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("PathScale <support@pathscale.com>");
+MODULE_DESCRIPTION("Pathscale InfiniPath driver");
+
+const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = 0,
+ [IB_QPS_INIT] = IPATH_POST_RECV_OK,
+ [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
+ [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
+ IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
+ [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
+ IPATH_POST_SEND_OK,
+ [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
+ [IB_QPS_ERR] = 0,
+};
+
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
+ [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [IB_WR_SEND] = IB_WC_SEND,
+ [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+ [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+};
+
+/*
+ * System image GUID.
+ */
+__be64 sys_image_guid;
+
+/**
+ * ipath_copy_sge - copy data to SGE memory
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ */
+void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
+{
+ struct ipath_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ BUG_ON(len == 0);
+ if (len > length)
+ len = length;
+ memcpy(sge->vaddr, data, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr != NULL) {
+ if (++sge->n >= IPATH_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ data += len;
+ length -= len;
+ }
+}
+
+/**
+ * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
+ * @ss: the SGE state
+ * @length: the number of bytes to skip
+ */
+void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
+{
+ struct ipath_sge *sge = &ss->sge;
+
+ while (length > sge->sge_length) {
+ length -= sge->sge_length;
+ ss->sge = *ss->sg_list++;
+ }
+ while (length) {
+ u32 len = sge->length;
+
+ BUG_ON(len == 0);
+ if (len > length)
+ len = length;
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr != NULL) {
+ if (++sge->n >= IPATH_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+}
+
+/**
+ * ipath_post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct ipath_qp *qp = to_iqp(ibqp);
+ int err = 0;
+
+ /* Check that state is OK to post send. */
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)) {
+ *bad_wr = wr;
+ err = -EINVAL;
+ goto bail;
+ }
+
+ for (; wr; wr = wr->next) {
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ err = ipath_post_rc_send(qp, wr);
+ break;
+
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ err = ipath_post_ud_send(qp, wr);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+ }
+
+bail:
+ return err;
+}
+
+/**
+ * ipath_post_receive - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct ipath_qp *qp = to_iqp(ibqp);
+ unsigned long flags;
+ int ret;
+
+ /* Check that state is OK to post receive. */
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ for (; wr; wr = wr->next) {
+ struct ipath_rwqe *wqe;
+ u32 next;
+ int i, j;
+
+ if (wr->num_sge > qp->r_rq.max_sge) {
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ next = qp->r_rq.head + 1;
+ if (next >= qp->r_rq.size)
+ next = 0;
+ if (next == qp->r_rq.tail) {
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head);
+ wqe->wr_id = wr->wr_id;
+ wqe->sg_list[0].mr = NULL;
+ wqe->sg_list[0].vaddr = NULL;
+ wqe->sg_list[0].length = 0;
+ wqe->sg_list[0].sge_length = 0;
+ wqe->length = 0;
+ for (i = 0, j = 0; i < wr->num_sge; i++) {
+ /* Check LKEY */
+ if (to_ipd(qp->ibqp.pd)->user &&
+ wr->sg_list[i].lkey == 0) {
+ spin_unlock_irqrestore(&qp->r_rq.lock,
+ flags);
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (wr->sg_list[i].length == 0)
+ continue;
+ if (!ipath_lkey_ok(
+ &to_idev(qp->ibqp.device)->lk_table,
+ &wqe->sg_list[j], &wr->sg_list[i],
+ IB_ACCESS_LOCAL_WRITE)) {
+ spin_unlock_irqrestore(&qp->r_rq.lock,
+ flags);
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+ wqe->length += wr->sg_list[i].length;
+ j++;
+ }
+ wqe->num_sge = j;
+ qp->r_rq.head = next;
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_qp_rcv - processing an incoming packet on a QP
+ * @dev: the device the packet came on
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from ipath_ib_rcv() to process an incoming packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+static void ipath_qp_rcv(struct ipath_ibdev *dev,
+ struct ipath_ib_header *hdr, int has_grh,
+ void *data, u32 tlen, struct ipath_qp *qp)
+{
+ /* Check for valid receive state. */
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
+ dev->n_pkt_drops++;
+ return;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
+ break;
+
+ case IB_QPT_RC:
+ ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
+ break;
+
+ case IB_QPT_UC:
+ ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ipath_ib_rcv - process and incoming packet
+ * @arg: the device pointer
+ * @rhdr: the header of the packet
+ * @data: the packet data
+ * @tlen: the packet length
+ *
+ * This is called from ipath_kreceive() to process an incoming packet at
+ * interrupt level. Tlen is the length of the header + data + CRC in bytes.
+ */
+static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen)
+{
+ struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
+ struct ipath_ib_header *hdr = rhdr;
+ struct ipath_other_headers *ohdr;
+ struct ipath_qp *qp;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+ u16 lid;
+
+ if (unlikely(dev == NULL))
+ goto bail;
+
+ if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */
+ dev->rcv_errors++;
+ goto bail;
+ }
+
+ /* Check for a valid destination LID (see ch. 7.11.1). */
+ lid = be16_to_cpu(hdr->lrh[1]);
+ if (lid < IPS_MULTICAST_LID_BASE) {
+ lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
+ if (unlikely(lid != ipath_layer_get_lid(dev->dd))) {
+ dev->rcv_errors++;
+ goto bail;
+ }
+ }
+
+ /* Check for GRH */
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh == IPS_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == IPS_LRH_GRH)
+ ohdr = &hdr->u.l.oth;
+ else {
+ dev->rcv_errors++;
+ goto bail;
+ }
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ dev->opstats[opcode].n_bytes += tlen;
+ dev->opstats[opcode].n_packets++;
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->bth[1]) & IPS_QPN_MASK;
+ if (qp_num == IPS_MULTICAST_QPN) {
+ struct ipath_mcast *mcast;
+ struct ipath_mcast_qp *p;
+
+ mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
+ if (mcast == NULL) {
+ dev->n_pkt_drops++;
+ goto bail;
+ }
+ dev->n_multicast_rcv++;
+ list_for_each_entry_rcu(p, &mcast->qp_list, list)
+ ipath_qp_rcv(dev, hdr, lnh == IPS_LRH_GRH, data,
+ tlen, p->qp);
+ /*
+ * Notify ipath_multicast_detach() if it is waiting for us
+ * to finish.
+ */
+ if (atomic_dec_return(&mcast->refcount) <= 1)
+ wake_up(&mcast->wait);
+ } else {
+ qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
+ if (qp) {
+ dev->n_unicast_rcv++;
+ ipath_qp_rcv(dev, hdr, lnh == IPS_LRH_GRH, data,
+ tlen, qp);
+ /*
+ * Notify ipath_destroy_qp() if it is waiting
+ * for us to finish.
+ */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ } else
+ dev->n_pkt_drops++;
+ }
+
+bail:;
+}
+
+/**
+ * ipath_ib_timer - verbs timer
+ * @arg: the device pointer
+ *
+ * This is called from ipath_do_rcv_timer() at interrupt level to check for
+ * QPs which need retransmits and to collect performance numbers.
+ */
+static void ipath_ib_timer(void *arg)
+{
+ struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
+ struct ipath_qp *resend = NULL;
+ struct ipath_qp *rnr = NULL;
+ struct list_head *last;
+ struct ipath_qp *qp;
+ unsigned long flags;
+
+ if (dev == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ /* Start filling the next pending queue. */
+ if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
+ dev->pending_index = 0;
+ /* Save any requests still in the new queue, they have timed out. */
+ last = &dev->pending[dev->pending_index];
+ while (!list_empty(last)) {
+ qp = list_entry(last->next, struct ipath_qp, timerwait);
+ if (last->next == LIST_POISON1 ||
+ last->next != &qp->timerwait ||
+ qp->timerwait.prev != last) {
+ INIT_LIST_HEAD(last);
+ } else {
+ list_del(&qp->timerwait);
+ qp->timerwait.prev = (struct list_head *) resend;
+ resend = qp;
+ atomic_inc(&qp->refcount);
+ }
+ }
+ last = &dev->rnrwait;
+ if (!list_empty(last)) {
+ qp = list_entry(last->next, struct ipath_qp, timerwait);
+ if (--qp->s_rnr_timeout == 0) {
+ do {
+ if (last->next == LIST_POISON1 ||
+ last->next != &qp->timerwait ||
+ qp->timerwait.prev != last) {
+ INIT_LIST_HEAD(last);
+ break;
+ }
+ list_del(&qp->timerwait);
+ qp->timerwait.prev =
+ (struct list_head *) rnr;
+ rnr = qp;
+ if (list_empty(last))
+ break;
+ qp = list_entry(last->next, struct ipath_qp,
+ timerwait);
+ } while (qp->s_rnr_timeout == 0);
+ }
+ }
+ /*
+ * We should only be in the started state if pma_sample_start != 0
+ */
+ if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
+ --dev->pma_sample_start == 0) {
+ dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+ ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword,
+ &dev->ipath_rword,
+ &dev->ipath_spkts,
+ &dev->ipath_rpkts,
+ &dev->ipath_xmit_wait);
+ }
+ if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
+ if (dev->pma_sample_interval == 0) {
+ u64 ta, tb, tc, td, te;
+
+ dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ ipath_layer_snapshot_counters(dev->dd, &ta, &tb,
+ &tc, &td, &te);
+
+ dev->ipath_sword = ta - dev->ipath_sword;
+ dev->ipath_rword = tb - dev->ipath_rword;
+ dev->ipath_spkts = tc - dev->ipath_spkts;
+ dev->ipath_rpkts = td - dev->ipath_rpkts;
+ dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
+ }
+ else
+ dev->pma_sample_interval--;
+ }
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ /* XXX What if timer fires again while this is running? */
+ for (qp = resend; qp != NULL;
+ qp = (struct ipath_qp *) qp->timerwait.prev) {
+ struct ib_wc wc;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
+ dev->n_timeouts++;
+ ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Notify ipath_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+ for (qp = rnr; qp != NULL;
+ qp = (struct ipath_qp *) qp->timerwait.prev)
+ tasklet_hi_schedule(&qp->s_task);
+}
+
+/**
+ * ipath_ib_piobufavail - callback when a PIO buffer is available
+ * @arg: the device pointer
+ *
+ * This is called from ipath_intr() at interrupt level when a PIO buffer is
+ * available after ipath_verbs_send() returned an error that no buffers were
+ * available. Return 0 if we consumed all the PIO buffers and we still have
+ * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
+ * return one).
+ */
+static int ipath_ib_piobufavail(void *arg)
+{
+ struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
+ struct ipath_qp *qp;
+ unsigned long flags;
+
+ if (dev == NULL)
+ goto bail;
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ while (!list_empty(&dev->piowait)) {
+ qp = list_entry(dev->piowait.next, struct ipath_qp,
+ piowait);
+ list_del(&qp->piowait);
+ tasklet_hi_schedule(&qp->s_task);
+ }
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+bail:
+ return 1;
+}
+
+static int ipath_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ u32 vendor, boardrev, majrev, minrev;
+
+ memset(props, 0, sizeof(*props));
+
+ props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID;
+ ipath_layer_query_device(dev->dd, &vendor, &boardrev,
+ &majrev, &minrev);
+ props->vendor_id = vendor;
+ props->vendor_part_id = boardrev;
+ props->hw_ver = boardrev << 16 | majrev << 8 | minrev;
+
+ props->sys_image_guid = dev->sys_image_guid;
+
+ props->max_mr_size = ~0ull;
+ props->max_qp = 0xffff;
+ props->max_qp_wr = 0xffff;
+ props->max_sge = 255;
+ props->max_cq = 0xffff;
+ props->max_cqe = 0xffff;
+ props->max_mr = 0xffff;
+ props->max_pd = 0xffff;
+ props->max_qp_rd_atom = 1;
+ props->max_qp_init_rd_atom = 1;
+ /* props->max_res_rd_atom */
+ props->max_srq = 0xffff;
+ props->max_srq_wr = 0xffff;
+ props->max_srq_sge = 255;
+ /* props->local_ca_ack_delay */
+ props->atomic_cap = IB_ATOMIC_HCA;
+ props->max_pkeys = ipath_layer_get_npkeys(dev->dd);
+ props->max_mcast_grp = 0xffff;
+ props->max_mcast_qp_attach = 0xffff;
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_mcast_grp;
+
+ return 0;
+}
+
+const u8 ipath_cvt_physportstate[16] = {
+ [INFINIPATH_IBCS_LT_STATE_DISABLED] = 3,
+ [INFINIPATH_IBCS_LT_STATE_LINKUP] = 5,
+ [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2,
+ [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2,
+ [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1,
+ [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1,
+ [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4,
+ [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4,
+ [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4,
+ [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4,
+ [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6,
+ [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6,
+ [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
+};
+
+static int ipath_query_port(struct ib_device *ibdev,
+ u8 port, struct ib_port_attr *props)
+{
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ enum ib_mtu mtu;
+ u16 lid = ipath_layer_get_lid(dev->dd);
+ u64 ibcstat;
+
+ memset(props, 0, sizeof(*props));
+ props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+ props->lmc = dev->mkeyprot_resv_lmc & 7;
+ props->sm_lid = dev->sm_lid;
+ props->sm_sl = dev->sm_sl;
+ ibcstat = ipath_layer_get_lastibcstat(dev->dd);
+ props->state = ((ibcstat >> 4) & 0x3) + 1;
+ /* See phys_state_show() */
+ props->phys_state = ipath_cvt_physportstate[
+ ipath_layer_get_lastibcstat(dev->dd) & 0xf];
+ props->port_cap_flags = dev->port_cap_flags;
+ props->gid_tbl_len = 1;
+ props->max_msg_sz = 4096;
+ props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd);
+ props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) -
+ dev->n_pkey_violations;
+ props->qkey_viol_cntr = dev->qkey_violations;
+ props->active_width = IB_WIDTH_4X;
+ /* See rate_show() */
+ props->active_speed = 1; /* Regular 10Mbs speed. */
+ props->max_vl_num = 1; /* VLCap = VL0 */
+ props->init_type_reply = 0;
+
+ props->max_mtu = IB_MTU_4096;
+ switch (ipath_layer_get_ibmtu(dev->dd)) {
+ case 4096:
+ mtu = IB_MTU_4096;
+ break;
+ case 2048:
+ mtu = IB_MTU_2048;
+ break;
+ case 1024:
+ mtu = IB_MTU_1024;
+ break;
+ case 512:
+ mtu = IB_MTU_512;
+ break;
+ case 256:
+ mtu = IB_MTU_256;
+ break;
+ default:
+ mtu = IB_MTU_2048;
+ }
+ props->active_mtu = mtu;
+ props->subnet_timeout = dev->subnet_timeout;
+
+ return 0;
+}
+
+static int ipath_modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ int ret;
+
+ if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ ret = -EOPNOTSUPP;
+ goto bail;
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
+ memcpy(device->node_desc, device_modify->node_desc, 64);
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
+ to_idev(device)->sys_image_guid =
+ cpu_to_be64(device_modify->sys_image_guid);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int ipath_modify_port(struct ib_device *ibdev,
+ u8 port, int port_modify_mask,
+ struct ib_port_modify *props)
+{
+ struct ipath_ibdev *dev = to_idev(ibdev);
+
+ dev->port_cap_flags |= props->set_port_cap_mask;
+ dev->port_cap_flags &= ~props->clr_port_cap_mask;
+ if (port_modify_mask & IB_PORT_SHUTDOWN)
+ ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
+ if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+ dev->qkey_violations = 0;
+ return 0;
+}
+
+static int ipath_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ int ret;
+
+ if (index >= 1) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ gid->global.subnet_prefix = dev->gid_prefix;
+ gid->global.interface_id = ipath_layer_get_guid(dev->dd);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct ipath_pd *pd;
+ struct ib_pd *ret;
+
+ pd = kmalloc(sizeof *pd, GFP_KERNEL);
+ if (!pd) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ /* ib_alloc_pd() will initialize pd->ibpd. */
+ pd->user = udata != NULL;
+
+ ret = &pd->ibpd;
+
+bail:
+ return ret;
+}
+
+static int ipath_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct ipath_pd *pd = to_ipd(ibpd);
+
+ kfree(pd);
+
+ return 0;
+}
+
+/**
+ * ipath_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ *
+ * This may be called from interrupt context.
+ */
+static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ struct ipath_ah *ah;
+ struct ib_ah *ret;
+
+ /* A multicast address requires a GRH (see ch. 8.4.1). */
+ if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
+ ah_attr->dlid != IPS_PERMISSIVE_LID &&
+ !(ah_attr->ah_flags & IB_AH_GRH)) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+ if (!ah) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ /* ib_create_ah() will initialize ah->ibah. */
+ ah->attr = *ah_attr;
+
+ ret = &ah->ibah;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_destroy_ah - destroy an address handle
+ * @ibah: the AH to destroy
+ *
+ * This may be called from interrupt context.
+ */
+static int ipath_destroy_ah(struct ib_ah *ibah)
+{
+ struct ipath_ah *ah = to_iah(ibah);
+
+ kfree(ah);
+
+ return 0;
+}
+
+static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct ipath_ah *ah = to_iah(ibah);
+
+ *ah_attr = ah->attr;
+
+ return 0;
+}
+
+static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct ipath_ibdev *dev = to_idev(ibdev);
+ int ret;
+
+ if (index >= ipath_layer_get_npkeys(dev->dd)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *pkey = ipath_layer_get_pkey(dev->dd, index);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+
+/**
+ * ipath_alloc_ucontext - allocate a ucontest
+ * @ibdev: the infiniband device
+ * @udata: not used by the InfiniPath driver
+ */
+
+static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct ipath_ucontext *context;
+ struct ib_ucontext *ret;
+
+ context = kmalloc(sizeof *context, GFP_KERNEL);
+ if (!context) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ ret = &context->ibucontext;
+
+bail:
+ return ret;
+}
+
+static int ipath_dealloc_ucontext(struct ib_ucontext *context)
+{
+ kfree(to_iucontext(context));
+ return 0;
+}
+
+static int ipath_verbs_register_sysfs(struct ib_device *dev);
+
+/**
+ * ipath_register_ib_device - register our device with the infiniband core
+ * @unit: the device number to register
+ * @dd: the device data structure
+ * Return the allocated ipath_ibdev pointer or NULL on error.
+ */
+static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
+{
+ struct ipath_ibdev *idev;
+ struct ib_device *dev;
+ int ret;
+
+ idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
+ if (idev == NULL)
+ goto bail;
+
+ dev = &idev->ibdev;
+
+ /* Only need to initialize non-zero fields. */
+ spin_lock_init(&idev->qp_table.lock);
+ spin_lock_init(&idev->lk_table.lock);
+ idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+ /* Set the prefix to the default value (see ch. 4.1.1) */
+ idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
+
+ ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
+ if (ret)
+ goto err_qp;
+
+ /*
+ * The top ib_ipath_lkey_table_size bits are used to index the
+ * table. The lower 8 bits can be owned by the user (copied from
+ * the LKEY). The remaining bits act as a generation number or tag.
+ */
+ idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
+ idev->lk_table.table = kzalloc(idev->lk_table.max *
+ sizeof(*idev->lk_table.table),
+ GFP_KERNEL);
+ if (idev->lk_table.table == NULL) {
+ ret = -ENOMEM;
+ goto err_lk;
+ }
+ spin_lock_init(&idev->pending_lock);
+ INIT_LIST_HEAD(&idev->pending[0]);
+ INIT_LIST_HEAD(&idev->pending[1]);
+ INIT_LIST_HEAD(&idev->pending[2]);
+ INIT_LIST_HEAD(&idev->piowait);
+ INIT_LIST_HEAD(&idev->rnrwait);
+ idev->pending_index = 0;
+ idev->port_cap_flags =
+ IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
+ idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT;
+ idev->link_width_enabled = 3; /* 1x or 4x */
+
+ /*
+ * The system image GUID is supposed to be the same for all
+ * IB HCAs in a single system but since there can be other
+ * device types in the system, we can't be sure this is unique.
+ */
+ if (!sys_image_guid)
+ sys_image_guid = ipath_layer_get_guid(dd);
+ idev->sys_image_guid = sys_image_guid;
+ idev->ib_unit = unit;
+ idev->dd = dd;
+
+ strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
+ dev->node_guid = ipath_layer_get_guid(dd);
+ dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
+ dev->uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+ dev->node_type = IB_NODE_CA;
+ dev->phys_port_cnt = 1;
+ dev->dma_device = ipath_layer_get_device(dd);
+ dev->class_dev.dev = dev->dma_device;
+ dev->query_device = ipath_query_device;
+ dev->modify_device = ipath_modify_device;
+ dev->query_port = ipath_query_port;
+ dev->modify_port = ipath_modify_port;
+ dev->query_pkey = ipath_query_pkey;
+ dev->query_gid = ipath_query_gid;
+ dev->alloc_ucontext = ipath_alloc_ucontext;
+ dev->dealloc_ucontext = ipath_dealloc_ucontext;
+ dev->alloc_pd = ipath_alloc_pd;
+ dev->dealloc_pd = ipath_dealloc_pd;
+ dev->create_ah = ipath_create_ah;
+ dev->destroy_ah = ipath_destroy_ah;
+ dev->query_ah = ipath_query_ah;
+ dev->create_srq = ipath_create_srq;
+ dev->modify_srq = ipath_modify_srq;
+ dev->query_srq = ipath_query_srq;
+ dev->destroy_srq = ipath_destroy_srq;
+ dev->create_qp = ipath_create_qp;
+ dev->modify_qp = ipath_modify_qp;
+ dev->query_qp = ipath_query_qp;
+ dev->destroy_qp = ipath_destroy_qp;
+ dev->post_send = ipath_post_send;
+ dev->post_recv = ipath_post_receive;
+ dev->post_srq_recv = ipath_post_srq_receive;
+ dev->create_cq = ipath_create_cq;
+ dev->destroy_cq = ipath_destroy_cq;
+ dev->resize_cq = ipath_resize_cq;
+ dev->poll_cq = ipath_poll_cq;
+ dev->req_notify_cq = ipath_req_notify_cq;
+ dev->get_dma_mr = ipath_get_dma_mr;
+ dev->reg_phys_mr = ipath_reg_phys_mr;
+ dev->reg_user_mr = ipath_reg_user_mr;
+ dev->dereg_mr = ipath_dereg_mr;
+ dev->alloc_fmr = ipath_alloc_fmr;
+ dev->map_phys_fmr = ipath_map_phys_fmr;
+ dev->unmap_fmr = ipath_unmap_fmr;
+ dev->dealloc_fmr = ipath_dealloc_fmr;
+ dev->attach_mcast = ipath_multicast_attach;
+ dev->detach_mcast = ipath_multicast_detach;
+ dev->process_mad = ipath_process_mad;
+
+ snprintf(dev->node_desc, sizeof(dev->node_desc),
+ IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename);
+
+ ret = ib_register_device(dev);
+ if (ret)
+ goto err_reg;
+
+ if (ipath_verbs_register_sysfs(dev))
+ goto err_class;
+
+ ipath_layer_enable_timer(dd);
+
+ goto bail;
+
+err_class:
+ ib_unregister_device(dev);
+err_reg:
+ kfree(idev->lk_table.table);
+err_lk:
+ kfree(idev->qp_table.table);
+err_qp:
+ ib_dealloc_device(dev);
+ _VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n",
+ unit, -ret);
+ idev = NULL;
+
+bail:
+ return idev;
+}
+
+static void ipath_unregister_ib_device(void *arg)
+{
+ struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
+ struct ib_device *ibdev = &dev->ibdev;
+
+ ipath_layer_disable_timer(dev->dd);
+
+ ib_unregister_device(ibdev);
+
+ if (!list_empty(&dev->pending[0]) ||
+ !list_empty(&dev->pending[1]) ||
+ !list_empty(&dev->pending[2]))
+ _VERBS_ERROR("ipath%d pending list not empty!\n",
+ dev->ib_unit);
+ if (!list_empty(&dev->piowait))
+ _VERBS_ERROR("ipath%d piowait list not empty!\n",
+ dev->ib_unit);
+ if (!list_empty(&dev->rnrwait))
+ _VERBS_ERROR("ipath%d rnrwait list not empty!\n",
+ dev->ib_unit);
+ if (!ipath_mcast_tree_empty())
+ _VERBS_ERROR("ipath%d multicast table memory leak!\n",
+ dev->ib_unit);
+ /*
+ * Note that ipath_unregister_ib_device() can be called before all
+ * the QPs are destroyed!
+ */
+ ipath_free_all_qps(&dev->qp_table);
+ kfree(dev->qp_table.table);
+ kfree(dev->lk_table.table);
+ ib_dealloc_device(ibdev);
+}
+
+int __init ipath_verbs_init(void)
+{
+ return ipath_verbs_register(ipath_register_ib_device,
+ ipath_unregister_ib_device,
+ ipath_ib_piobufavail, ipath_ib_rcv,
+ ipath_ib_timer);
+}
+
+void __exit ipath_verbs_cleanup(void)
+{
+ ipath_verbs_unregister();
+}
+
+static ssize_t show_rev(struct class_device *cdev, char *buf)
+{
+ struct ipath_ibdev *dev =
+ container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ int vendor, boardrev, majrev, minrev;
+
+ ipath_layer_query_device(dev->dd, &vendor, &boardrev,
+ &majrev, &minrev);
+ return sprintf(buf, "%d.%d\n", majrev, minrev);
+}
+
+static ssize_t show_hca(struct class_device *cdev, char *buf)
+{
+ struct ipath_ibdev *dev =
+ container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ int ret;
+
+ ret = ipath_layer_get_boardname(dev->dd, buf, 128);
+ if (ret < 0)
+ goto bail;
+ strcat(buf, "\n");
+ ret = strlen(buf);
+
+bail:
+ return ret;
+}
+
+static ssize_t show_stats(struct class_device *cdev, char *buf)
+{
+ struct ipath_ibdev *dev =
+ container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ int i;
+ int len;
+
+ len = sprintf(buf,
+ "RC resends %d\n"
+ "RC QACKs %d\n"
+ "RC ACKs %d\n"
+ "RC SEQ NAKs %d\n"
+ "RC RDMA seq %d\n"
+ "RC RNR NAKs %d\n"
+ "RC OTH NAKs %d\n"
+ "RC timeouts %d\n"
+ "RC RDMA dup %d\n"
+ "piobuf wait %d\n"
+ "no piobuf %d\n"
+ "PKT drops %d\n"
+ "WQE errs %d\n",
+ dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
+ dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
+ dev->n_other_naks, dev->n_timeouts,
+ dev->n_rdma_dup_busy, dev->n_piowait,
+ dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
+ for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
+ const struct ipath_opcode_stats *si = &dev->opstats[i];
+
+ if (!si->n_packets && !si->n_bytes)
+ continue;
+ len += sprintf(buf + len, "%02x %llu/%llu\n", i,
+ (unsigned long long) si->n_packets,
+ (unsigned long long) si->n_bytes);
+ }
+ return len;
+}
+
+static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
+static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
+
+static struct class_device_attribute *ipath_class_attributes[] = {
+ &class_device_attr_hw_rev,
+ &class_device_attr_hca_type,
+ &class_device_attr_board_id,
+ &class_device_attr_stats
+};
+
+static int ipath_verbs_register_sysfs(struct ib_device *dev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
+ if (class_device_create_file(&dev->class_dev,
+ ipath_class_attributes[i])) {
+ ret = 1;
+ goto bail;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+module_init(ipath_verbs_init);
+module_exit(ipath_verbs_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
new file mode 100644
index 0000000000000..b824632b2a8c4
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -0,0 +1,697 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IPATH_VERBS_H
+#define IPATH_VERBS_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <rdma/ib_pack.h>
+
+#include "ipath_layer.h"
+#include "verbs_debug.h"
+
+#define QPN_MAX (1 << 24)
+#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define IPATH_UVERBS_ABI_VERSION 1
+
+/*
+ * Define an ib_cq_notify value that is not valid so we know when CQ
+ * notifications are armed.
+ */
+#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
+
+#define IB_RNR_NAK 0x20
+#define IB_NAK_PSN_ERROR 0x60
+#define IB_NAK_INVALID_REQUEST 0x61
+#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
+#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
+#define IB_NAK_INVALID_RD_REQUEST 0x64
+
+#define IPATH_POST_SEND_OK 0x01
+#define IPATH_POST_RECV_OK 0x02
+#define IPATH_PROCESS_RECV_OK 0x04
+#define IPATH_PROCESS_SEND_OK 0x08
+
+/* IB Performance Manager status values */
+#define IB_PMA_SAMPLE_STATUS_DONE 0x00
+#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
+#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
+
+/* Mandatory IB performance counter select values. */
+#define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001)
+#define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003)
+#define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005)
+
+struct ib_reth {
+ __be64 vaddr;
+ __be32 rkey;
+ __be32 length;
+} __attribute__ ((packed));
+
+struct ib_atomic_eth {
+ __be64 vaddr;
+ __be32 rkey;
+ __be64 swap_data;
+ __be64 compare_data;
+} __attribute__ ((packed));
+
+struct ipath_other_headers {
+ __be32 bth[3];
+ union {
+ struct {
+ __be32 deth[2];
+ __be32 imm_data;
+ } ud;
+ struct {
+ struct ib_reth reth;
+ __be32 imm_data;
+ } rc;
+ struct {
+ __be32 aeth;
+ __be64 atomic_ack_eth;
+ } at;
+ __be32 imm_data;
+ __be32 aeth;
+ struct ib_atomic_eth atomic_eth;
+ } u;
+} __attribute__ ((packed));
+
+/*
+ * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
+ * long (72 w/ imm_data). Only the first 56 bytes of the IB header
+ * will be in the eager header buffer. The remaining 12 or 16 bytes
+ * are in the data buffer.
+ */
+struct ipath_ib_header {
+ __be16 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct ipath_other_headers oth;
+ } l;
+ struct ipath_other_headers oth;
+ } u;
+} __attribute__ ((packed));
+
+/*
+ * There is one struct ipath_mcast for each multicast GID.
+ * All attached QPs are then stored as a list of
+ * struct ipath_mcast_qp.
+ */
+struct ipath_mcast_qp {
+ struct list_head list;
+ struct ipath_qp *qp;
+};
+
+struct ipath_mcast {
+ struct rb_node rb_node;
+ union ib_gid mgid;
+ struct list_head qp_list;
+ wait_queue_head_t wait;
+ atomic_t refcount;
+};
+
+/* Memory region */
+struct ipath_mr {
+ struct ib_mr ibmr;
+ struct ipath_mregion mr; /* must be last */
+};
+
+/* Fast memory region */
+struct ipath_fmr {
+ struct ib_fmr ibfmr;
+ u8 page_shift;
+ struct ipath_mregion mr; /* must be last */
+};
+
+/* Protection domain */
+struct ipath_pd {
+ struct ib_pd ibpd;
+ int user; /* non-zero if created from user space */
+};
+
+/* Address Handle */
+struct ipath_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+};
+
+/*
+ * Quick description of our CQ/QP locking scheme:
+ *
+ * We have one global lock that protects dev->cq/qp_table. Each
+ * struct ipath_cq/qp also has its own lock. An individual qp lock
+ * may be taken inside of an individual cq lock. Both cqs attached to
+ * a qp may be locked, with the send cq locked first. No other
+ * nesting should be done.
+ *
+ * Each struct ipath_cq/qp also has an atomic_t ref count. The
+ * pointer from the cq/qp_table to the struct counts as one reference.
+ * This reference also is good for access through the consumer API, so
+ * modifying the CQ/QP etc doesn't need to take another reference.
+ * Access because of a completion being polled does need a reference.
+ *
+ * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the
+ * destroy function to sleep on.
+ *
+ * This means that access from the consumer API requires nothing but
+ * taking the struct's lock.
+ *
+ * Access because of a completion event should go as follows:
+ * - lock cq/qp_table and look up struct
+ * - increment ref count in struct
+ * - drop cq/qp_table lock
+ * - lock struct, do your thing, and unlock struct
+ * - decrement ref count; if zero, wake up waiters
+ *
+ * To destroy a CQ/QP, we can do the following:
+ * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
+ * - decrement ref count
+ * - wait_event until ref count is zero
+ *
+ * It is the consumer's responsibilty to make sure that no QP
+ * operations (WQE posting or state modification) are pending when the
+ * QP is destroyed. Also, the consumer must make sure that calls to
+ * qp_modify are serialized.
+ *
+ * Possible optimizations (wait for profile data to see if/where we
+ * have locks bouncing between CPUs):
+ * - split cq/qp table lock into n separate (cache-aligned) locks,
+ * indexed (say) by the page in the table
+ */
+
+struct ipath_cq {
+ struct ib_cq ibcq;
+ struct tasklet_struct comptask;
+ spinlock_t lock;
+ u8 notify;
+ u8 triggered;
+ u32 head; /* new records added to the head */
+ u32 tail; /* poll_cq() reads from here. */
+ struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */
+};
+
+/*
+ * Send work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->s_max_sge.
+ */
+struct ipath_swqe {
+ struct ib_send_wr wr; /* don't use wr.sg_list */
+ u32 psn; /* first packet sequence number */
+ u32 lpsn; /* last packet sequence number */
+ u32 ssn; /* send sequence number */
+ u32 length; /* total length of data in sg_list */
+ struct ipath_sge sg_list[0];
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->r_max_sge.
+ */
+struct ipath_rwqe {
+ u64 wr_id;
+ u32 length; /* total length of data in sg_list */
+ u8 num_sge;
+ struct ipath_sge sg_list[0];
+};
+
+struct ipath_rq {
+ spinlock_t lock;
+ u32 head; /* new work requests posted to the head */
+ u32 tail; /* receives pull requests from here. */
+ u32 size; /* size of RWQE array */
+ u8 max_sge;
+ struct ipath_rwqe *wq; /* RWQE array */
+};
+
+struct ipath_srq {
+ struct ib_srq ibsrq;
+ struct ipath_rq rq;
+ /* send signal when number of RWQEs < limit */
+ u32 limit;
+};
+
+/*
+ * Variables prefixed with s_ are for the requester (sender).
+ * Variables prefixed with r_ are for the responder (receiver).
+ * Variables prefixed with ack_ are for responder replies.
+ *
+ * Common variables are protected by both r_rq.lock and s_lock in that order
+ * which only happens in modify_qp() or changing the QP 'state'.
+ */
+struct ipath_qp {
+ struct ib_qp ibqp;
+ struct ipath_qp *next; /* link list for QPN hash table */
+ struct list_head piowait; /* link for wait PIO buf */
+ struct list_head timerwait; /* link for waiting for timeouts */
+ struct ib_ah_attr remote_ah_attr;
+ struct ipath_ib_header s_hdr; /* next packet header to send */
+ atomic_t refcount;
+ wait_queue_head_t wait;
+ struct tasklet_struct s_task;
+ struct ipath_sge_state *s_cur_sge;
+ struct ipath_sge_state s_sge; /* current send request data */
+ /* current RDMA read send data */
+ struct ipath_sge_state s_rdma_sge;
+ struct ipath_sge_state r_sge; /* current receive data */
+ spinlock_t s_lock;
+ unsigned long s_flags;
+ u32 s_hdrwords; /* size of s_hdr in 32 bit words */
+ u32 s_cur_size; /* size of send packet in bytes */
+ u32 s_len; /* total length of s_sge */
+ u32 s_rdma_len; /* total length of s_rdma_sge */
+ u32 s_next_psn; /* PSN for next request */
+ u32 s_last_psn; /* last response PSN processed */
+ u32 s_psn; /* current packet sequence number */
+ u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
+ u32 s_ack_psn; /* PSN for next ACK or RDMA_READ */
+ u64 s_ack_atomic; /* data for atomic ACK */
+ u64 r_wr_id; /* ID for current receive WQE */
+ u64 r_atomic_data; /* data for last atomic op */
+ u32 r_atomic_psn; /* PSN of last atomic op */
+ u32 r_len; /* total length of r_sge */
+ u32 r_rcv_len; /* receive data len processed */
+ u32 r_psn; /* expected rcv packet sequence number */
+ u8 state; /* QP state */
+ u8 s_state; /* opcode of last packet sent */
+ u8 s_ack_state; /* opcode of packet to ACK */
+ u8 s_nak_state; /* non-zero if NAK is pending */
+ u8 r_state; /* opcode of last packet received */
+ u8 r_reuse_sge; /* for UC receive errors */
+ u8 r_sge_inx; /* current index into sg_list */
+ u8 s_max_sge; /* size of s_wq->sg_list */
+ u8 qp_access_flags;
+ u8 s_retry_cnt; /* number of times to retry */
+ u8 s_rnr_retry_cnt;
+ u8 s_min_rnr_timer;
+ u8 s_retry; /* requester retry counter */
+ u8 s_rnr_retry; /* requester RNR retry counter */
+ u8 s_pkey_index; /* PKEY index to use */
+ enum ib_mtu path_mtu;
+ atomic_t msn; /* message sequence number */
+ u32 remote_qpn;
+ u32 qkey; /* QKEY for this QP (for UD or RD) */
+ u32 s_size; /* send work queue size */
+ u32 s_head; /* new entries added here */
+ u32 s_tail; /* next entry to process */
+ u32 s_cur; /* current work queue entry */
+ u32 s_last; /* last un-ACK'ed entry */
+ u32 s_ssn; /* SSN of tail entry */
+ u32 s_lsn; /* limit sequence number (credit) */
+ struct ipath_swqe *s_wq; /* send work queue */
+ struct ipath_rq r_rq; /* receive work queue */
+};
+
+/*
+ * Bit definitions for s_flags.
+ */
+#define IPATH_S_BUSY 0
+#define IPATH_S_SIGNAL_REQ_WR 1
+
+/*
+ * Since struct ipath_swqe is not a fixed size, we can't simply index into
+ * struct ipath_qp.s_wq. This function does the array index computation.
+ */
+static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
+ unsigned n)
+{
+ return (struct ipath_swqe *)((char *)qp->s_wq +
+ (sizeof(struct ipath_swqe) +
+ qp->s_max_sge *
+ sizeof(struct ipath_sge)) * n);
+}
+
+/*
+ * Since struct ipath_rwqe is not a fixed size, we can't simply index into
+ * struct ipath_rq.wq. This function does the array index computation.
+ */
+static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
+ unsigned n)
+{
+ return (struct ipath_rwqe *)
+ ((char *) rq->wq +
+ (sizeof(struct ipath_rwqe) +
+ rq->max_sge * sizeof(struct ipath_sge)) * n);
+}
+
+/*
+ * QPN-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way,
+ * large bitmaps are not allocated unless large numbers of QPs are used.
+ */
+struct qpn_map {
+ atomic_t n_free;
+ void *page;
+};
+
+struct ipath_qp_table {
+ spinlock_t lock;
+ u32 last; /* last QP number allocated */
+ u32 max; /* size of the hash table */
+ u32 nmaps; /* size of the map table */
+ struct ipath_qp **table;
+ /* bit map of free numbers */
+ struct qpn_map map[QPNMAP_ENTRIES];
+};
+
+struct ipath_lkey_table {
+ spinlock_t lock;
+ u32 next; /* next unused index (speeds search) */
+ u32 gen; /* generation count */
+ u32 max; /* size of the table */
+ struct ipath_mregion **table;
+};
+
+struct ipath_opcode_stats {
+ u64 n_packets; /* number of packets */
+ u64 n_bytes; /* total number of bytes */
+};
+
+struct ipath_ibdev {
+ struct ib_device ibdev;
+ struct list_head dev_list;
+ struct ipath_devdata *dd;
+ int ib_unit; /* This is the device number */
+ u16 sm_lid; /* in host order */
+ u8 sm_sl;
+ u8 mkeyprot_resv_lmc;
+ /* non-zero when timer is set */
+ unsigned long mkey_lease_timeout;
+
+ /* The following fields are really per port. */
+ struct ipath_qp_table qp_table;
+ struct ipath_lkey_table lk_table;
+ struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
+ struct list_head piowait; /* list for wait PIO buf */
+ /* list of QPs waiting for RNR timer */
+ struct list_head rnrwait;
+ spinlock_t pending_lock;
+ __be64 sys_image_guid; /* in network order */
+ __be64 gid_prefix; /* in network order */
+ __be64 mkey;
+ u64 ipath_sword; /* total dwords sent (sample result) */
+ u64 ipath_rword; /* total dwords received (sample result) */
+ u64 ipath_spkts; /* total packets sent (sample result) */
+ u64 ipath_rpkts; /* total packets received (sample result) */
+ /* # of ticks no data sent (sample result) */
+ u64 ipath_xmit_wait;
+ u64 rcv_errors; /* # of packets with SW detected rcv errs */
+ u64 n_unicast_xmit; /* total unicast packets sent */
+ u64 n_unicast_rcv; /* total unicast packets received */
+ u64 n_multicast_xmit; /* total multicast packets sent */
+ u64 n_multicast_rcv; /* total multicast packets received */
+ u64 n_symbol_error_counter; /* starting count for PMA */
+ u64 n_link_error_recovery_counter; /* starting count for PMA */
+ u64 n_link_downed_counter; /* starting count for PMA */
+ u64 n_port_rcv_errors; /* starting count for PMA */
+ u64 n_port_rcv_remphys_errors; /* starting count for PMA */
+ u64 n_port_xmit_discards; /* starting count for PMA */
+ u64 n_port_xmit_data; /* starting count for PMA */
+ u64 n_port_rcv_data; /* starting count for PMA */
+ u64 n_port_xmit_packets; /* starting count for PMA */
+ u64 n_port_rcv_packets; /* starting count for PMA */
+ u32 n_pkey_violations; /* starting count for PMA */
+ u32 n_rc_resends;
+ u32 n_rc_acks;
+ u32 n_rc_qacks;
+ u32 n_seq_naks;
+ u32 n_rdma_seq;
+ u32 n_rnr_naks;
+ u32 n_other_naks;
+ u32 n_timeouts;
+ u32 n_pkt_drops;
+ u32 n_wqe_errs;
+ u32 n_rdma_dup_busy;
+ u32 n_piowait;
+ u32 n_no_piobuf;
+ u32 port_cap_flags;
+ u32 pma_sample_start;
+ u32 pma_sample_interval;
+ __be16 pma_counter_select[5];
+ u16 pma_tag;
+ u16 qkey_violations;
+ u16 mkey_violations;
+ u16 mkey_lease_period;
+ u16 pending_index; /* which pending queue is active */
+ u8 pma_sample_status;
+ u8 subnet_timeout;
+ u8 link_width_enabled;
+ u8 vl_high_limit;
+ struct ipath_opcode_stats opstats[128];
+};
+
+struct ipath_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct ipath_mr, ibmr);
+}
+
+static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct ipath_fmr, ibfmr);
+}
+
+static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct ipath_pd, ibpd);
+}
+
+static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct ipath_ah, ibah);
+}
+
+static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct ipath_cq, ibcq);
+}
+
+static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct ipath_srq, ibsrq);
+}
+
+static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct ipath_qp, ibqp);
+}
+
+static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct ipath_ibdev, ibdev);
+}
+
+int ipath_process_mad(struct ib_device *ibdev,
+ int mad_flags,
+ u8 port_num,
+ struct ib_wc *in_wc,
+ struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad);
+
+static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct ipath_ucontext, ibucontext);
+}
+
+/*
+ * Compare the lower 24 bits of the two values.
+ * Returns an integer <, ==, or > than zero.
+ */
+static inline int ipath_cmp24(u32 a, u32 b)
+{
+ return (((int) a) - ((int) b)) << 8;
+}
+
+struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
+
+int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int ipath_mcast_tree_empty(void);
+
+__be32 ipath_compute_aeth(struct ipath_qp *qp);
+
+struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
+
+struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+
+int ipath_destroy_qp(struct ib_qp *ibqp);
+
+int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask);
+
+int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr);
+
+void ipath_free_all_qps(struct ipath_qp_table *qpt);
+
+int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
+
+void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
+
+void ipath_error_qp(struct ipath_qp *qp);
+
+void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
+
+void ipath_do_rc_send(unsigned long data);
+
+void ipath_do_uc_send(unsigned long data);
+
+void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
+
+int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
+ u32 len, u64 vaddr, u32 rkey, int acc);
+
+int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
+ struct ib_sge *sge, int acc);
+
+void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
+
+void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
+
+int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr);
+
+void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
+
+void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
+
+void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc);
+
+void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss,
+ u32 length, struct ib_send_wr *wr, struct ib_wc *wc);
+
+int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
+
+void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
+
+int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
+ struct ipath_mregion *mr);
+
+void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
+
+int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
+ struct ib_sge *sge, int acc);
+
+int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
+ u32 len, u64 vaddr, u32 rkey, int acc);
+
+int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+
+int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask);
+
+int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+
+int ipath_destroy_srq(struct ib_srq *ibsrq);
+
+void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
+
+int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+
+struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int ipath_destroy_cq(struct ib_cq *ibcq);
+
+int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
+
+int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+
+struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
+
+struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start);
+
+struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
+ int mr_access_flags,
+ struct ib_udata *udata);
+
+int ipath_dereg_mr(struct ib_mr *ibmr);
+
+struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+
+int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
+ int list_len, u64 iova);
+
+int ipath_unmap_fmr(struct list_head *fmr_list);
+
+int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
+
+void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
+
+void ipath_insert_rnr_queue(struct ipath_qp *qp);
+
+int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
+
+void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc);
+
+extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
+
+extern const u8 ipath_cvt_physportstate[];
+
+extern const int ib_ipath_state_ops[];
+
+extern unsigned int ib_ipath_lkey_table_size;
+
+extern const u32 ib_ipath_rnr_table[];
+
+#endif /* IPATH_VERBS_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
new file mode 100644
index 0000000000000..10b31d2c4f209
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+
+#include "ipath_verbs.h"
+
+/*
+ * Global table of GID to attached QPs.
+ * The table is global to all ipath devices since a send from one QP/device
+ * needs to be locally routed to any locally attached QPs on the same
+ * or different device.
+ */
+static struct rb_root mcast_tree;
+static DEFINE_SPINLOCK(mcast_lock);
+
+/**
+ * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
+ * @qp: the QP to link
+ */
+static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
+{
+ struct ipath_mcast_qp *mqp;
+
+ mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+ if (!mqp)
+ goto bail;
+
+ mqp->qp = qp;
+ atomic_inc(&qp->refcount);
+
+bail:
+ return mqp;
+}
+
+static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
+{
+ struct ipath_qp *qp = mqp->qp;
+
+ /* Notify ipath_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+
+ kfree(mqp);
+}
+
+/**
+ * ipath_mcast_alloc - allocate the multicast GID structure
+ * @mgid: the multicast GID
+ *
+ * A list of QPs will be attached to this structure.
+ */
+static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
+{
+ struct ipath_mcast *mcast;
+
+ mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+ if (!mcast)
+ goto bail;
+
+ mcast->mgid = *mgid;
+ INIT_LIST_HEAD(&mcast->qp_list);
+ init_waitqueue_head(&mcast->wait);
+ atomic_set(&mcast->refcount, 0);
+
+bail:
+ return mcast;
+}
+
+static void ipath_mcast_free(struct ipath_mcast *mcast)
+{
+ struct ipath_mcast_qp *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
+ ipath_mcast_qp_free(p);
+
+ kfree(mcast);
+}
+
+/**
+ * ipath_mcast_find - search the global table for the given multicast GID
+ * @mgid: the multicast GID to search for
+ *
+ * Returns NULL if not found.
+ *
+ * The caller is responsible for decrementing the reference count if found.
+ */
+struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
+{
+ struct rb_node *n;
+ unsigned long flags;
+ struct ipath_mcast *mcast;
+
+ spin_lock_irqsave(&mcast_lock, flags);
+ n = mcast_tree.rb_node;
+ while (n) {
+ int ret;
+
+ mcast = rb_entry(n, struct ipath_mcast, rb_node);
+
+ ret = memcmp(mgid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else {
+ atomic_inc(&mcast->refcount);
+ spin_unlock_irqrestore(&mcast_lock, flags);
+ goto bail;
+ }
+ }
+ spin_unlock_irqrestore(&mcast_lock, flags);
+
+ mcast = NULL;
+
+bail:
+ return mcast;
+}
+
+/**
+ * ipath_mcast_add - insert mcast GID into table and attach QP struct
+ * @mcast: the mcast GID table
+ * @mqp: the QP to attach
+ *
+ * Return zero if both were added. Return EEXIST if the GID was already in
+ * the table but the QP was added. Return ESRCH if the QP was already
+ * attached and neither structure was added.
+ */
+static int ipath_mcast_add(struct ipath_mcast *mcast,
+ struct ipath_mcast_qp *mqp)
+{
+ struct rb_node **n = &mcast_tree.rb_node;
+ struct rb_node *pn = NULL;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mcast_lock, flags);
+
+ while (*n) {
+ struct ipath_mcast *tmcast;
+ struct ipath_mcast_qp *p;
+
+ pn = *n;
+ tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
+
+ ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0) {
+ n = &pn->rb_left;
+ continue;
+ }
+ if (ret > 0) {
+ n = &pn->rb_right;
+ continue;
+ }
+
+ /* Search the QP list to see if this is already there. */
+ list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
+ if (p->qp == mqp->qp) {
+ spin_unlock_irqrestore(&mcast_lock, flags);
+ ret = ESRCH;
+ goto bail;
+ }
+ }
+ list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
+ spin_unlock_irqrestore(&mcast_lock, flags);
+ ret = EEXIST;
+ goto bail;
+ }
+
+ list_add_tail_rcu(&mqp->list, &mcast->qp_list);
+
+ atomic_inc(&mcast->refcount);
+ rb_link_node(&mcast->rb_node, pn, n);
+ rb_insert_color(&mcast->rb_node, &mcast_tree);
+
+ spin_unlock_irqrestore(&mcast_lock, flags);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct ipath_qp *qp = to_iqp(ibqp);
+ struct ipath_mcast *mcast;
+ struct ipath_mcast_qp *mqp;
+ int ret;
+
+ /*
+ * Allocate data structures since its better to do this outside of
+ * spin locks and it will most likely be needed.
+ */
+ mcast = ipath_mcast_alloc(gid);
+ if (mcast == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ mqp = ipath_mcast_qp_alloc(qp);
+ if (mqp == NULL) {
+ ipath_mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+ }
+ switch (ipath_mcast_add(mcast, mqp)) {
+ case ESRCH:
+ /* Neither was used: can't attach the same QP twice. */
+ ipath_mcast_qp_free(mqp);
+ ipath_mcast_free(mcast);
+ ret = -EINVAL;
+ goto bail;
+ case EEXIST: /* The mcast wasn't used */
+ ipath_mcast_free(mcast);
+ break;
+ default:
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct ipath_qp *qp = to_iqp(ibqp);
+ struct ipath_mcast *mcast = NULL;
+ struct ipath_mcast_qp *p, *tmp;
+ struct rb_node *n;
+ unsigned long flags;
+ int last = 0;
+ int ret;
+
+ spin_lock_irqsave(&mcast_lock, flags);
+
+ /* Find the GID in the mcast table. */
+ n = mcast_tree.rb_node;
+ while (1) {
+ if (n == NULL) {
+ spin_unlock_irqrestore(&mcast_lock, flags);
+ ret = 0;
+ goto bail;
+ }
+
+ mcast = rb_entry(n, struct ipath_mcast, rb_node);
+ ret = memcmp(gid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ /* Search the QP list. */
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
+ if (p->qp != qp)
+ continue;
+ /*
+ * We found it, so remove it, but don't poison the forward
+ * link until we are sure there are no list walkers.
+ */
+ list_del_rcu(&p->list);
+
+ /* If this was the last attached QP, remove the GID too. */
+ if (list_empty(&mcast->qp_list)) {
+ rb_erase(&mcast->rb_node, &mcast_tree);
+ last = 1;
+ }
+ break;
+ }
+
+ spin_unlock_irqrestore(&mcast_lock, flags);
+
+ if (p) {
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ ipath_mcast_qp_free(p);
+ }
+ if (last) {
+ atomic_dec(&mcast->refcount);
+ wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+ ipath_mcast_free(mcast);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int ipath_mcast_tree_empty(void)
+{
+ return mcast_tree.rb_node == NULL;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
new file mode 100644
index 0000000000000..adc5322f15c17
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file is conditionally built on x86_64 only. Otherwise weak symbol
+ * versions of the functions exported from here are used.
+ */
+
+#include <linux/pci.h>
+#include <asm/mtrr.h>
+#include <asm/processor.h>
+
+#include "ipath_kernel.h"
+
+/**
+ * ipath_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: infinipath device
+ *
+ * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
+ * write combining.
+ */
+int ipath_enable_wc(struct ipath_devdata *dd)
+{
+ int ret = 0;
+ u64 pioaddr, piolen;
+ unsigned bits;
+ const unsigned long addr = pci_resource_start(dd->pcidev, 0);
+ const size_t len = pci_resource_len(dd->pcidev, 0);
+
+ /*
+ * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
+ * chip. Linux (possibly the hardware) requires it to be on a power
+ * of 2 address matching the length (which has to be a power of 2).
+ * For rev1, that means the base address, for rev2, it will be just
+ * the PIO buffers themselves.
+ */
+ pioaddr = addr + dd->ipath_piobufbase;
+ piolen = (dd->ipath_piobcnt2k +
+ dd->ipath_piobcnt4k) *
+ ALIGN(dd->ipath_piobcnt2k +
+ dd->ipath_piobcnt4k, dd->ipath_palign);
+
+ for (bits = 0; !(piolen & (1ULL << bits)); bits++)
+ /* do nothing */ ;
+
+ if (piolen != (1ULL << bits)) {
+ piolen >>= bits;
+ while (piolen >>= 1)
+ bits++;
+ piolen = 1ULL << (bits + 1);
+ }
+ if (pioaddr & (piolen - 1)) {
+ u64 atmp;
+ ipath_dbg("pioaddr %llx not on right boundary for size "
+ "%llx, fixing\n",
+ (unsigned long long) pioaddr,
+ (unsigned long long) piolen);
+ atmp = pioaddr & ~(piolen - 1);
+ if (atmp < addr || (atmp + piolen) > (addr + len)) {
+ ipath_dev_err(dd, "No way to align address/size "
+ "(%llx/%llx), no WC mtrr\n",
+ (unsigned long long) atmp,
+ (unsigned long long) piolen << 1);
+ ret = -ENODEV;
+ } else {
+ ipath_dbg("changing WC base from %llx to %llx, "
+ "len from %llx to %llx\n",
+ (unsigned long long) pioaddr,
+ (unsigned long long) atmp,
+ (unsigned long long) piolen,
+ (unsigned long long) piolen << 1);
+ pioaddr = atmp;
+ piolen <<= 1;
+ }
+ }
+
+ if (!ret) {
+ int cookie;
+ ipath_cdbg(VERBOSE, "Setting mtrr for chip to WC "
+ "(addr %llx, len=0x%llx)\n",
+ (unsigned long long) pioaddr,
+ (unsigned long long) piolen);
+ cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
+ if (cookie < 0) {
+ {
+ dev_info(&dd->pcidev->dev,
+ "mtrr_add() WC for PIO bufs "
+ "failed (%d)\n",
+ cookie);
+ ret = -EINVAL;
+ }
+ } else {
+ ipath_cdbg(VERBOSE, "Set mtrr for chip to WC, "
+ "cookie is %d\n", cookie);
+ dd->ipath_wc_cookie = cookie;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ipath_disable_wc - disable write combining for MMIO writes to the device
+ * @dd: infinipath device
+ */
+void ipath_disable_wc(struct ipath_devdata *dd)
+{
+ if (dd->ipath_wc_cookie) {
+ ipath_cdbg(VERBOSE, "undoing WCCOMB on pio buffers\n");
+ mtrr_del(dd->ipath_wc_cookie, 0, 0);
+ dd->ipath_wc_cookie = 0;
+ }
+}
+
+/**
+ * ipath_unordered_wc - indicate whether write combining is ordered
+ *
+ * Because our performance depends on our ability to do write combining mmio
+ * writes in the most efficient way, we need to know if we are on an Intel
+ * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
+ * the order completed, and so no special flushing is required to get
+ * correct ordering. Intel processors, however, will flush write buffers
+ * out in "random" orders, and so explicit ordering is needed at times.
+ */
+int ipath_unordered_wc(void)
+{
+ return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+}
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h
new file mode 100644
index 0000000000000..410a764dfcef9
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ips_common.h
@@ -0,0 +1,263 @@
+#ifndef IPS_COMMON_H
+#define IPS_COMMON_H
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ipath_common.h"
+
+struct ipath_header {
+ /*
+ * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
+ * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
+ * Port 3, TID 11, offset 14.
+ */
+ __le32 ver_port_tid_offset;
+ __le16 chksum;
+ __le16 pkt_flags;
+};
+
+struct ips_message_header {
+ __be16 lrh[4];
+ __be32 bth[3];
+ /* fields below this point are in host byte order */
+ struct ipath_header iph;
+ __u8 sub_opcode;
+ __u8 flags;
+ __u16 src_rank;
+ /* 24 bits. The upper 8 bit is available for other use */
+ union {
+ struct {
+ unsigned ack_seq_num:24;
+ unsigned port:4;
+ unsigned unused:4;
+ };
+ __u32 ack_seq_num_org;
+ };
+ __u8 expected_tid_session_id;
+ __u8 tinylen; /* to aid MPI */
+ union {
+ __u16 tag; /* to aid MPI */
+ __u16 mqhdr; /* for PSM MQ */
+ };
+ union {
+ __u32 mpi[4]; /* to aid MPI */
+ __u32 data[4];
+ __u64 mq[2]; /* for PSM MQ */
+ struct {
+ __u16 mtu;
+ __u8 major_ver;
+ __u8 minor_ver;
+ __u32 not_used; //free
+ __u32 run_id;
+ __u32 client_ver;
+ };
+ };
+};
+
+struct ether_header {
+ __be16 lrh[4];
+ __be32 bth[3];
+ struct ipath_header iph;
+ __u8 sub_opcode;
+ __u8 cmd;
+ __be16 lid;
+ __u16 mac[3];
+ __u8 frag_num;
+ __u8 seq_num;
+ __le32 len;
+ /* MUST be of word size due to PIO write requirements */
+ __u32 csum;
+ __le16 csum_offset;
+ __le16 flags;
+ __u16 first_2_bytes;
+ __u8 unused[2]; /* currently unused */
+};
+
+/*
+ * The PIO buffer used for sending infinipath messages must only be written
+ * in 32-bit words, all the data must be written, and no writes can occur
+ * after the last word is written (which transfers "ownership" of the buffer
+ * to the chip and triggers the message to be sent).
+ * Since the Linux sk_buff structure can be recursive, non-aligned, and
+ * any number of bytes in each segment, we use the following structure
+ * to keep information about the overall state of the copy operation.
+ * This is used to save the information needed to store the checksum
+ * in the right place before sending the last word to the hardware and
+ * to buffer the last 0-3 bytes of non-word sized segments.
+ */
+struct copy_data_s {
+ struct ether_header *hdr;
+ /* addr of PIO buf to write csum to */
+ __u32 __iomem *csum_pio;
+ __u32 __iomem *to; /* addr of PIO buf to write data to */
+ __u32 device; /* which device to allocate PIO bufs from */
+ __s32 error; /* set if there is an error. */
+ __s32 extra; /* amount of data saved in u.buf below */
+ __u32 len; /* total length to send in bytes */
+ __u32 flen; /* frament length in words */
+ __u32 csum; /* partial IP checksum */
+ __u32 pos; /* position for partial checksum */
+ __u32 offset; /* offset to where data currently starts */
+ __s32 checksum_calc; /* set to 1 when csum has been calculated */
+ struct sk_buff *skb;
+ union {
+ __u32 w;
+ __u8 buf[4];
+ } u;
+};
+
+/* IB - LRH header consts */
+#define IPS_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
+#define IPS_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
+
+#define IPS_OFFSET 0
+
+/*
+ * defines the cut-off point between the header queue and eager/expected
+ * TID queue
+ */
+#define NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE \
+ ((sizeof(struct ips_message_header) - \
+ offsetof(struct ips_message_header, iph)) >> 2)
+
+/* OpCodes */
+#define OPCODE_IPS 0xC0
+#define OPCODE_ITH4X 0xC1
+
+/* OpCode 30 is use by stand-alone test programs */
+#define OPCODE_RAW_DATA 0xDE
+/* last OpCode (31) is reserved for test */
+#define OPCODE_TEST 0xDF
+
+/* sub OpCodes - ips */
+#define OPCODE_SEQ_DATA 0x01
+#define OPCODE_SEQ_CTRL 0x02
+
+#define OPCODE_SEQ_MQ_DATA 0x03
+#define OPCODE_SEQ_MQ_CTRL 0x04
+
+#define OPCODE_ACK 0x10
+#define OPCODE_NAK 0x11
+
+#define OPCODE_ERR_CHK 0x20
+#define OPCODE_ERR_CHK_PLS 0x21
+
+#define OPCODE_STARTUP 0x30
+#define OPCODE_STARTUP_ACK 0x31
+#define OPCODE_STARTUP_NAK 0x32
+
+#define OPCODE_STARTUP_EXT 0x34
+#define OPCODE_STARTUP_ACK_EXT 0x35
+#define OPCODE_STARTUP_NAK_EXT 0x36
+
+#define OPCODE_TIDS_RELEASE 0x40
+#define OPCODE_TIDS_RELEASE_CONFIRM 0x41
+
+#define OPCODE_CLOSE 0x50
+#define OPCODE_CLOSE_ACK 0x51
+/*
+ * like OPCODE_CLOSE, but no complaint if other side has already closed.
+ * Used when doing abort(), MPI_Abort(), etc.
+ */
+#define OPCODE_ABORT 0x52
+
+/* sub OpCodes - ith4x */
+#define OPCODE_ENCAP 0x81
+#define OPCODE_LID_ARP 0x82
+
+/* Receive Header Queue: receive type (from infinipath) */
+#define RCVHQ_RCV_TYPE_EXPECTED 0
+#define RCVHQ_RCV_TYPE_EAGER 1
+#define RCVHQ_RCV_TYPE_NON_KD 2
+#define RCVHQ_RCV_TYPE_ERROR 3
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define EAGER_TID_ID INFINIPATH_I_TID_MASK
+
+#define IPS_DEFAULT_P_KEY 0xFFFF
+
+#define IPS_PERMISSIVE_LID 0xFFFF
+#define IPS_MULTICAST_LID_BASE 0xC000
+
+#define IPS_AETH_CREDIT_SHIFT 24
+#define IPS_AETH_CREDIT_MASK 0x1F
+#define IPS_AETH_CREDIT_INVAL 0x1F
+
+#define IPS_PSN_MASK 0xFFFFFF
+#define IPS_MSN_MASK 0xFFFFFF
+#define IPS_QPN_MASK 0xFFFFFF
+#define IPS_MULTICAST_QPN 0xFFFFFF
+
+/* functions for extracting fields from rcvhdrq entries */
+static inline __u32 ips_get_hdr_err_flags(const __le32 * rbuf)
+{
+ return __le32_to_cpu(rbuf[1]);
+}
+
+static inline __u32 ips_get_index(const __le32 * rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
+ & INFINIPATH_RHF_EGRINDEX_MASK;
+}
+
+static inline __u32 ips_get_rcv_type(const __le32 * rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
+ & INFINIPATH_RHF_RCVTYPE_MASK;
+}
+
+static inline __u32 ips_get_length_in_bytes(const __le32 * rbuf)
+{
+ return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
+ & INFINIPATH_RHF_LENGTH_MASK) << 2;
+}
+
+static inline void *ips_get_first_protocol_header(const __u32 * rbuf)
+{
+ return (void *)&rbuf[2];
+}
+
+static inline struct ips_message_header *ips_get_ips_header(const __u32 *
+ rbuf)
+{
+ return (struct ips_message_header *)&rbuf[2];
+}
+
+static inline __u32 ips_get_ipath_ver(__le32 hdrword)
+{
+ return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
+ & INFINIPATH_I_VERS_MASK;
+}
+
+#endif /* IPS_COMMON_H */
diff --git a/drivers/infiniband/hw/ipath/verbs_debug.h b/drivers/infiniband/hw/ipath/verbs_debug.h
new file mode 100644
index 0000000000000..40d693cf3f94c
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/verbs_debug.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VERBS_DEBUG_H
+#define _VERBS_DEBUG_H
+
+/*
+ * This file contains tracing code for the ib_ipath kernel module.
+ */
+#ifndef _VERBS_DEBUGGING /* tracing enabled or not */
+#define _VERBS_DEBUGGING 1
+#endif
+
+extern unsigned ib_ipath_debug;
+
+#define _VERBS_ERROR(fmt,...) \
+ do { \
+ printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
+ } while(0)
+
+#define _VERBS_UNIT_ERROR(unit,fmt,...) \
+ do { \
+ printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
+ } while(0)
+
+#if _VERBS_DEBUGGING
+
+/*
+ * Mask values for debugging. The scheme allows us to compile out any
+ * of the debug tracing stuff, and if compiled in, to enable or
+ * disable dynamically.
+ * This can be set at modprobe time also:
+ * modprobe ib_path ib_ipath_debug=3
+ */
+
+#define __VERBS_INFO 0x1 /* generic low verbosity stuff */
+#define __VERBS_DBG 0x2 /* generic debug */
+#define __VERBS_VDBG 0x4 /* verbose debug */
+#define __VERBS_SMADBG 0x8000 /* sma packet debug */
+
+#define _VERBS_INFO(fmt,...) \
+ do { \
+ if (unlikely(ib_ipath_debug&__VERBS_INFO)) \
+ printk(KERN_INFO "%s: " fmt,"ib_ipath", \
+ ##__VA_ARGS__); \
+ } while(0)
+
+#define _VERBS_DBG(fmt,...) \
+ do { \
+ if (unlikely(ib_ipath_debug&__VERBS_DBG)) \
+ printk(KERN_DEBUG "%s: " fmt, __func__, \
+ ##__VA_ARGS__); \
+ } while(0)
+
+#define _VERBS_VDBG(fmt,...) \
+ do { \
+ if (unlikely(ib_ipath_debug&__VERBS_VDBG)) \
+ printk(KERN_DEBUG "%s: " fmt, __func__, \
+ ##__VA_ARGS__); \
+ } while(0)
+
+#define _VERBS_SMADBG(fmt,...) \
+ do { \
+ if (unlikely(ib_ipath_debug&__VERBS_SMADBG)) \
+ printk(KERN_DEBUG "%s: " fmt, __func__, \
+ ##__VA_ARGS__); \
+ } while(0)
+
+#else /* ! _VERBS_DEBUGGING */
+
+#define _VERBS_INFO(fmt,...)
+#define _VERBS_DBG(fmt,...)
+#define _VERBS_VDBG(fmt,...)
+#define _VERBS_SMADBG(fmt,...)
+
+#endif /* _VERBS_DEBUGGING */
+
+#endif /* _VERBS_DEBUG_H */
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
index d7828936fd8f5..07358fb51b82a 100644
--- a/drivers/input/evbug.c
+++ b/drivers/input/evbug.c
@@ -49,9 +49,8 @@ static struct input_handle *evbug_connect(struct input_handler *handler, struct
{
struct input_handle *handle;
- if (!(handle = kmalloc(sizeof(struct input_handle), GFP_KERNEL)))
+ if (!(handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL)))
return NULL;
- memset(handle, 0, sizeof(struct input_handle));
handle->dev = dev;
handle->handler = handler;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 745979f33dc2e..a34e3d91d9ed9 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -130,9 +130,8 @@ static int evdev_open(struct inode * inode, struct file * file)
if ((accept_err = input_accept_process(&(evdev_table[i]->handle), file)))
return accept_err;
- if (!(list = kmalloc(sizeof(struct evdev_list), GFP_KERNEL)))
+ if (!(list = kzalloc(sizeof(struct evdev_list), GFP_KERNEL)))
return -ENOMEM;
- memset(list, 0, sizeof(struct evdev_list));
list->evdev = evdev_table[i];
list_add_tail(&list->node, &evdev_table[i]->list);
@@ -609,9 +608,8 @@ static struct input_handle *evdev_connect(struct input_handler *handler, struct
return NULL;
}
- if (!(evdev = kmalloc(sizeof(struct evdev), GFP_KERNEL)))
+ if (!(evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL)))
return NULL;
- memset(evdev, 0, sizeof(struct evdev));
INIT_LIST_HEAD(&evdev->list);
init_waitqueue_head(&evdev->wait);
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index b765a155c0088..36644bff379d5 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/sched.h> /* HZ */
+#include <linux/mutex.h>
/*#include <asm/io.h>*/
@@ -43,10 +44,10 @@ EXPORT_SYMBOL(gameport_start_polling);
EXPORT_SYMBOL(gameport_stop_polling);
/*
- * gameport_sem protects entire gameport subsystem and is taken
+ * gameport_mutex protects entire gameport subsystem and is taken
* every time gameport port or driver registrered or unregistered.
*/
-static DECLARE_MUTEX(gameport_sem);
+static DEFINE_MUTEX(gameport_mutex);
static LIST_HEAD(gameport_list);
@@ -265,6 +266,7 @@ static void gameport_queue_event(void *object, struct module *owner,
if ((event = kmalloc(sizeof(struct gameport_event), GFP_ATOMIC))) {
if (!try_module_get(owner)) {
printk(KERN_WARNING "gameport: Can't get module reference, dropping event %d\n", event_type);
+ kfree(event);
goto out;
}
@@ -342,7 +344,7 @@ static void gameport_handle_event(void)
struct gameport_event *event;
struct gameport_driver *gameport_drv;
- down(&gameport_sem);
+ mutex_lock(&gameport_mutex);
/*
* Note that we handle only one event here to give swsusp
@@ -379,7 +381,7 @@ static void gameport_handle_event(void)
gameport_free_event(event);
}
- up(&gameport_sem);
+ mutex_unlock(&gameport_mutex);
}
/*
@@ -464,7 +466,7 @@ static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribut
struct device_driver *drv;
int retval;
- retval = down_interruptible(&gameport_sem);
+ retval = mutex_lock_interruptible(&gameport_mutex);
if (retval)
return retval;
@@ -484,7 +486,7 @@ static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribut
retval = -EINVAL;
}
- up(&gameport_sem);
+ mutex_unlock(&gameport_mutex);
return retval;
}
@@ -521,7 +523,7 @@ static void gameport_init_port(struct gameport *gameport)
__module_get(THIS_MODULE);
- init_MUTEX(&gameport->drv_sem);
+ mutex_init(&gameport->drv_mutex);
device_initialize(&gameport->dev);
snprintf(gameport->dev.bus_id, sizeof(gameport->dev.bus_id),
"gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
@@ -661,10 +663,10 @@ void __gameport_register_port(struct gameport *gameport, struct module *owner)
*/
void gameport_unregister_port(struct gameport *gameport)
{
- down(&gameport_sem);
+ mutex_lock(&gameport_mutex);
gameport_disconnect_port(gameport);
gameport_destroy_port(gameport);
- up(&gameport_sem);
+ mutex_unlock(&gameport_mutex);
}
@@ -717,7 +719,7 @@ void gameport_unregister_driver(struct gameport_driver *drv)
{
struct gameport *gameport;
- down(&gameport_sem);
+ mutex_lock(&gameport_mutex);
drv->ignore = 1; /* so gameport_find_driver ignores it */
start_over:
@@ -731,7 +733,7 @@ start_over:
}
driver_unregister(&drv->driver);
- up(&gameport_sem);
+ mutex_unlock(&gameport_mutex);
}
static int gameport_bus_match(struct device *dev, struct device_driver *drv)
@@ -743,9 +745,9 @@ static int gameport_bus_match(struct device *dev, struct device_driver *drv)
static void gameport_set_drv(struct gameport *gameport, struct gameport_driver *drv)
{
- down(&gameport->drv_sem);
+ mutex_lock(&gameport->drv_mutex);
gameport->drv = drv;
- up(&gameport->drv_sem);
+ mutex_unlock(&gameport->drv_mutex);
}
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode)
@@ -796,5 +798,5 @@ static void __exit gameport_exit(void)
kthread_stop(gameport_task);
}
-module_init(gameport_init);
+subsys_initcall(gameport_init);
module_exit(gameport_exit);
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index d2e55dc956baf..3e2d28f263e9a 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -252,14 +252,14 @@ static struct pnp_driver ns558_pnp_driver;
#endif
-static int pnp_registered = 0;
-
static int __init ns558_init(void)
{
int i = 0;
+ int error;
- if (pnp_register_driver(&ns558_pnp_driver) >= 0)
- pnp_registered = 1;
+ error = pnp_register_driver(&ns558_pnp_driver);
+ if (error && error != -ENODEV) /* should be ENOSYS really */
+ return error;
/*
* Probe ISA ports after PnP, so that PnP ports that are already
@@ -270,7 +270,7 @@ static int __init ns558_init(void)
while (ns558_isa_portlist[i])
ns558_isa_probe(ns558_isa_portlist[i++]);
- return (list_empty(&ns558_list) && !pnp_registered) ? -ENODEV : 0;
+ return list_empty(&ns558_list) && error ? -ENODEV : 0;
}
static void __exit ns558_exit(void)
@@ -283,8 +283,7 @@ static void __exit ns558_exit(void)
kfree(ns558);
}
- if (pnp_registered)
- pnp_unregister_driver(&ns558_pnp_driver);
+ pnp_unregister_driver(&ns558_pnp_driver);
}
module_init(ns558_init);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index f8af0945964ec..a935abeffffc4 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -18,9 +18,11 @@
#include <linux/random.h>
#include <linux/major.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/device.h>
+#include <linux/mutex.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION("Input core");
@@ -224,7 +226,7 @@ int input_open_device(struct input_handle *handle)
struct input_dev *dev = handle->dev;
int err;
- err = down_interruptible(&dev->sem);
+ err = mutex_lock_interruptible(&dev->mutex);
if (err)
return err;
@@ -236,7 +238,7 @@ int input_open_device(struct input_handle *handle)
if (err)
handle->open--;
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
return err;
}
@@ -255,13 +257,13 @@ void input_close_device(struct input_handle *handle)
input_release_device(handle);
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
if (!--dev->users && dev->close)
dev->close(dev);
handle->open--;
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
}
static void input_link_handle(struct input_handle *handle)
@@ -315,21 +317,6 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
return NULL;
}
-static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, int max)
-{
- int i;
- int len = 0;
-
- for (i = NBITS(max) - 1; i > 0; i--)
- if (bitmap[i])
- break;
-
- for (; i >= 0; i--)
- len += snprintf(buf + len, max(buf_size - len, 0),
- "%lx%s", bitmap[i], i > 0 ? " " : "");
- return len;
-}
-
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *proc_bus_input_dir;
@@ -342,7 +329,7 @@ static inline void input_wakeup_procfs_readers(void)
wake_up(&input_devices_poll_wait);
}
-static unsigned int input_devices_poll(struct file *file, poll_table *wait)
+static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
{
int state = input_devices_state;
poll_wait(file, &input_devices_poll_wait, wait);
@@ -351,115 +338,171 @@ static unsigned int input_devices_poll(struct file *file, poll_table *wait)
return 0;
}
-#define SPRINTF_BIT(ev, bm) \
- do { \
- len += sprintf(buf + len, "B: %s=", #ev); \
- len += input_print_bitmap(buf + len, INT_MAX, \
- dev->bm##bit, ev##_MAX); \
- len += sprintf(buf + len, "\n"); \
- } while (0)
+static struct list_head *list_get_nth_element(struct list_head *list, loff_t *pos)
+{
+ struct list_head *node;
+ loff_t i = 0;
-#define TEST_AND_SPRINTF_BIT(ev, bm) \
- do { \
- if (test_bit(EV_##ev, dev->evbit)) \
- SPRINTF_BIT(ev, bm); \
- } while (0)
+ list_for_each(node, list)
+ if (i++ == *pos)
+ return node;
+
+ return NULL;
+}
-static int input_devices_read(char *buf, char **start, off_t pos, int count, int *eof, void *data)
+static struct list_head *list_get_next_element(struct list_head *list, struct list_head *element, loff_t *pos)
{
- struct input_dev *dev;
- struct input_handle *handle;
- const char *path;
+ if (element->next == list)
+ return NULL;
+
+ ++(*pos);
+ return element->next;
+}
+
+static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ /* acquire lock here ... Yes, we do need locking, I knowi, I know... */
+
+ return list_get_nth_element(&input_dev_list, pos);
+}
- off_t at = 0;
- int len, cnt = 0;
+static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return list_get_next_element(&input_dev_list, v, pos);
+}
- list_for_each_entry(dev, &input_dev_list, node) {
+static void input_devices_seq_stop(struct seq_file *seq, void *v)
+{
+ /* release lock here */
+}
- path = kobject_get_path(&dev->cdev.kobj, GFP_KERNEL);
+static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
+ unsigned long *bitmap, int max)
+{
+ int i;
- len = sprintf(buf, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
- dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
+ for (i = NBITS(max) - 1; i > 0; i--)
+ if (bitmap[i])
+ break;
- len += sprintf(buf + len, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
- len += sprintf(buf + len, "P: Phys=%s\n", dev->phys ? dev->phys : "");
- len += sprintf(buf + len, "S: Sysfs=%s\n", path ? path : "");
- len += sprintf(buf + len, "H: Handlers=");
+ seq_printf(seq, "B: %s=", name);
+ for (; i >= 0; i--)
+ seq_printf(seq, "%lx%s", bitmap[i], i > 0 ? " " : "");
+ seq_putc(seq, '\n');
+}
- list_for_each_entry(handle, &dev->h_list, d_node)
- len += sprintf(buf + len, "%s ", handle->name);
-
- len += sprintf(buf + len, "\n");
-
- SPRINTF_BIT(EV, ev);
- TEST_AND_SPRINTF_BIT(KEY, key);
- TEST_AND_SPRINTF_BIT(REL, rel);
- TEST_AND_SPRINTF_BIT(ABS, abs);
- TEST_AND_SPRINTF_BIT(MSC, msc);
- TEST_AND_SPRINTF_BIT(LED, led);
- TEST_AND_SPRINTF_BIT(SND, snd);
- TEST_AND_SPRINTF_BIT(FF, ff);
- TEST_AND_SPRINTF_BIT(SW, sw);
-
- len += sprintf(buf + len, "\n");
-
- at += len;
-
- if (at >= pos) {
- if (!*start) {
- *start = buf + (pos - (at - len));
- cnt = at - pos;
- } else cnt += len;
- buf += len;
- if (cnt >= count)
- break;
- }
+static int input_devices_seq_show(struct seq_file *seq, void *v)
+{
+ struct input_dev *dev = container_of(v, struct input_dev, node);
+ const char *path = kobject_get_path(&dev->cdev.kobj, GFP_KERNEL);
+ struct input_handle *handle;
- kfree(path);
- }
+ seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
+ dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
- if (&dev->node == &input_dev_list)
- *eof = 1;
+ seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
+ seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
+ seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
+ seq_printf(seq, "H: Handlers=");
- return (count > cnt) ? cnt : count;
+ list_for_each_entry(handle, &dev->h_list, d_node)
+ seq_printf(seq, "%s ", handle->name);
+ seq_putc(seq, '\n');
+
+ input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
+ if (test_bit(EV_KEY, dev->evbit))
+ input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
+ if (test_bit(EV_REL, dev->evbit))
+ input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
+ if (test_bit(EV_ABS, dev->evbit))
+ input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
+ if (test_bit(EV_MSC, dev->evbit))
+ input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
+ if (test_bit(EV_LED, dev->evbit))
+ input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
+ if (test_bit(EV_SND, dev->evbit))
+ input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
+ if (test_bit(EV_FF, dev->evbit))
+ input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
+ if (test_bit(EV_SW, dev->evbit))
+ input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
+
+ seq_putc(seq, '\n');
+
+ kfree(path);
+ return 0;
}
-static int input_handlers_read(char *buf, char **start, off_t pos, int count, int *eof, void *data)
+static struct seq_operations input_devices_seq_ops = {
+ .start = input_devices_seq_start,
+ .next = input_devices_seq_next,
+ .stop = input_devices_seq_stop,
+ .show = input_devices_seq_show,
+};
+
+static int input_proc_devices_open(struct inode *inode, struct file *file)
{
- struct input_handler *handler;
+ return seq_open(file, &input_devices_seq_ops);
+}
- off_t at = 0;
- int len = 0, cnt = 0;
- int i = 0;
+static struct file_operations input_devices_fileops = {
+ .owner = THIS_MODULE,
+ .open = input_proc_devices_open,
+ .poll = input_proc_devices_poll,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
- list_for_each_entry(handler, &input_handler_list, node) {
+static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ /* acquire lock here ... Yes, we do need locking, I knowi, I know... */
+ seq->private = (void *)(unsigned long)*pos;
+ return list_get_nth_element(&input_handler_list, pos);
+}
+
+static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ seq->private = (void *)(unsigned long)(*pos + 1);
+ return list_get_next_element(&input_handler_list, v, pos);
+}
- if (handler->fops)
- len = sprintf(buf, "N: Number=%d Name=%s Minor=%d\n",
- i++, handler->name, handler->minor);
- else
- len = sprintf(buf, "N: Number=%d Name=%s\n",
- i++, handler->name);
+static void input_handlers_seq_stop(struct seq_file *seq, void *v)
+{
+ /* release lock here */
+}
- at += len;
+static int input_handlers_seq_show(struct seq_file *seq, void *v)
+{
+ struct input_handler *handler = container_of(v, struct input_handler, node);
- if (at >= pos) {
- if (!*start) {
- *start = buf + (pos - (at - len));
- cnt = at - pos;
- } else cnt += len;
- buf += len;
- if (cnt >= count)
- break;
- }
- }
- if (&handler->node == &input_handler_list)
- *eof = 1;
+ seq_printf(seq, "N: Number=%ld Name=%s",
+ (unsigned long)seq->private, handler->name);
+ if (handler->fops)
+ seq_printf(seq, " Minor=%d", handler->minor);
+ seq_putc(seq, '\n');
- return (count > cnt) ? cnt : count;
+ return 0;
}
+static struct seq_operations input_handlers_seq_ops = {
+ .start = input_handlers_seq_start,
+ .next = input_handlers_seq_next,
+ .stop = input_handlers_seq_stop,
+ .show = input_handlers_seq_show,
+};
-static struct file_operations input_fileops;
+static int input_proc_handlers_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &input_handlers_seq_ops);
+}
+
+static struct file_operations input_handlers_fileops = {
+ .owner = THIS_MODULE,
+ .open = input_proc_handlers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
static int __init input_proc_init(void)
{
@@ -471,20 +514,19 @@ static int __init input_proc_init(void)
proc_bus_input_dir->owner = THIS_MODULE;
- entry = create_proc_read_entry("devices", 0, proc_bus_input_dir, input_devices_read, NULL);
+ entry = create_proc_entry("devices", 0, proc_bus_input_dir);
if (!entry)
goto fail1;
entry->owner = THIS_MODULE;
- input_fileops = *entry->proc_fops;
- input_fileops.poll = input_devices_poll;
- entry->proc_fops = &input_fileops;
+ entry->proc_fops = &input_devices_fileops;
- entry = create_proc_read_entry("handlers", 0, proc_bus_input_dir, input_handlers_read, NULL);
+ entry = create_proc_entry("handlers", 0, proc_bus_input_dir);
if (!entry)
goto fail2;
entry->owner = THIS_MODULE;
+ entry->proc_fops = &input_handlers_fileops;
return 0;
@@ -512,13 +554,14 @@ static ssize_t input_dev_show_##name(struct class_device *dev, char *buf) \
struct input_dev *input_dev = to_input_dev(dev); \
int retval; \
\
- retval = down_interruptible(&input_dev->sem); \
+ retval = mutex_lock_interruptible(&input_dev->mutex); \
if (retval) \
return retval; \
\
- retval = sprintf(buf, "%s\n", input_dev->name ? input_dev->name : ""); \
+ retval = scnprintf(buf, PAGE_SIZE, \
+ "%s\n", input_dev->name ? input_dev->name : ""); \
\
- up(&input_dev->sem); \
+ mutex_unlock(&input_dev->mutex); \
\
return retval; \
} \
@@ -528,46 +571,51 @@ INPUT_DEV_STRING_ATTR_SHOW(name);
INPUT_DEV_STRING_ATTR_SHOW(phys);
INPUT_DEV_STRING_ATTR_SHOW(uniq);
-static int print_modalias_bits(char *buf, int size, char prefix, unsigned long *arr,
- unsigned int min, unsigned int max)
+static int input_print_modalias_bits(char *buf, int size,
+ char name, unsigned long *bm,
+ unsigned int min_bit, unsigned int max_bit)
{
- int len, i;
+ int len = 0, i;
- len = snprintf(buf, size, "%c", prefix);
- for (i = min; i < max; i++)
- if (arr[LONG(i)] & BIT(i))
- len += snprintf(buf + len, size - len, "%X,", i);
+ len += snprintf(buf, max(size, 0), "%c", name);
+ for (i = min_bit; i < max_bit; i++)
+ if (bm[LONG(i)] & BIT(i))
+ len += snprintf(buf + len, max(size - len, 0), "%X,", i);
return len;
}
-static int print_modalias(char *buf, int size, struct input_dev *id)
+static int input_print_modalias(char *buf, int size, struct input_dev *id,
+ int add_cr)
{
int len;
- len = snprintf(buf, size, "input:b%04Xv%04Xp%04Xe%04X-",
- id->id.bustype,
- id->id.vendor,
- id->id.product,
- id->id.version);
-
- len += print_modalias_bits(buf + len, size - len, 'e', id->evbit,
- 0, EV_MAX);
- len += print_modalias_bits(buf + len, size - len, 'k', id->keybit,
- KEY_MIN_INTERESTING, KEY_MAX);
- len += print_modalias_bits(buf + len, size - len, 'r', id->relbit,
- 0, REL_MAX);
- len += print_modalias_bits(buf + len, size - len, 'a', id->absbit,
- 0, ABS_MAX);
- len += print_modalias_bits(buf + len, size - len, 'm', id->mscbit,
- 0, MSC_MAX);
- len += print_modalias_bits(buf + len, size - len, 'l', id->ledbit,
- 0, LED_MAX);
- len += print_modalias_bits(buf + len, size - len, 's', id->sndbit,
- 0, SND_MAX);
- len += print_modalias_bits(buf + len, size - len, 'f', id->ffbit,
- 0, FF_MAX);
- len += print_modalias_bits(buf + len, size - len, 'w', id->swbit,
- 0, SW_MAX);
+ len = snprintf(buf, max(size, 0),
+ "input:b%04Xv%04Xp%04Xe%04X-",
+ id->id.bustype, id->id.vendor,
+ id->id.product, id->id.version);
+
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'e', id->evbit, 0, EV_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'r', id->relbit, 0, REL_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'a', id->absbit, 0, ABS_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'm', id->mscbit, 0, MSC_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'l', id->ledbit, 0, LED_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+ 's', id->sndbit, 0, SND_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'f', id->ffbit, 0, FF_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'w', id->swbit, 0, SW_MAX);
+
+ if (add_cr)
+ len += snprintf(buf + len, max(size - len, 0), "\n");
+
return len;
}
@@ -576,9 +624,9 @@ static ssize_t input_dev_show_modalias(struct class_device *dev, char *buf)
struct input_dev *id = to_input_dev(dev);
ssize_t len;
- len = print_modalias(buf, PAGE_SIZE, id);
- len += snprintf(buf + len, PAGE_SIZE-len, "\n");
- return len;
+ len = input_print_modalias(buf, PAGE_SIZE, id, 1);
+
+ return max_t(int, len, PAGE_SIZE);
}
static CLASS_DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
@@ -598,7 +646,7 @@ static struct attribute_group input_dev_attr_group = {
static ssize_t input_dev_show_id_##name(struct class_device *dev, char *buf) \
{ \
struct input_dev *input_dev = to_input_dev(dev); \
- return sprintf(buf, "%04x\n", input_dev->id.name); \
+ return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
} \
static CLASS_DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL);
@@ -620,11 +668,33 @@ static struct attribute_group input_dev_id_attr_group = {
.attrs = input_dev_id_attrs,
};
+static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
+ int max, int add_cr)
+{
+ int i;
+ int len = 0;
+
+ for (i = NBITS(max) - 1; i > 0; i--)
+ if (bitmap[i])
+ break;
+
+ for (; i >= 0; i--)
+ len += snprintf(buf + len, max(buf_size - len, 0),
+ "%lx%s", bitmap[i], i > 0 ? " " : "");
+
+ if (add_cr)
+ len += snprintf(buf + len, max(buf_size - len, 0), "\n");
+
+ return len;
+}
+
#define INPUT_DEV_CAP_ATTR(ev, bm) \
static ssize_t input_dev_show_cap_##bm(struct class_device *dev, char *buf) \
{ \
struct input_dev *input_dev = to_input_dev(dev); \
- return input_print_bitmap(buf, PAGE_SIZE, input_dev->bm##bit, ev##_MAX);\
+ int len = input_print_bitmap(buf, PAGE_SIZE, \
+ input_dev->bm##bit, ev##_MAX, 1); \
+ return min_t(int, len, PAGE_SIZE); \
} \
static CLASS_DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL);
@@ -669,8 +739,8 @@ static void input_dev_release(struct class_device *class_dev)
* device bitfields.
*/
static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index,
- char *buffer, int buffer_size, int *cur_len,
- const char *name, unsigned long *bitmap, int max)
+ char *buffer, int buffer_size, int *cur_len,
+ const char *name, unsigned long *bitmap, int max)
{
if (*cur_index >= num_envp - 1)
return -ENOMEM;
@@ -678,12 +748,36 @@ static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index,
envp[*cur_index] = buffer + *cur_len;
*cur_len += snprintf(buffer + *cur_len, max(buffer_size - *cur_len, 0), name);
- if (*cur_len > buffer_size)
+ if (*cur_len >= buffer_size)
return -ENOMEM;
*cur_len += input_print_bitmap(buffer + *cur_len,
max(buffer_size - *cur_len, 0),
- bitmap, max) + 1;
+ bitmap, max, 0) + 1;
+ if (*cur_len > buffer_size)
+ return -ENOMEM;
+
+ (*cur_index)++;
+ return 0;
+}
+
+static int input_add_uevent_modalias_var(char **envp, int num_envp, int *cur_index,
+ char *buffer, int buffer_size, int *cur_len,
+ struct input_dev *dev)
+{
+ if (*cur_index >= num_envp - 1)
+ return -ENOMEM;
+
+ envp[*cur_index] = buffer + *cur_len;
+
+ *cur_len += snprintf(buffer + *cur_len, max(buffer_size - *cur_len, 0),
+ "MODALIAS=");
+ if (*cur_len >= buffer_size)
+ return -ENOMEM;
+
+ *cur_len += input_print_modalias(buffer + *cur_len,
+ max(buffer_size - *cur_len, 0),
+ dev, 0) + 1;
if (*cur_len > buffer_size)
return -ENOMEM;
@@ -693,7 +787,7 @@ static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index,
#define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
do { \
- int err = add_uevent_var(envp, num_envp, &i, \
+ int err = add_uevent_var(envp, num_envp, &i, \
buffer, buffer_size, &len, \
fmt, val); \
if (err) \
@@ -709,6 +803,16 @@ static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index,
return err; \
} while (0)
+#define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
+ do { \
+ int err = input_add_uevent_modalias_var(envp, \
+ num_envp, &i, \
+ buffer, buffer_size, &len, \
+ dev); \
+ if (err) \
+ return err; \
+ } while (0)
+
static int input_dev_uevent(struct class_device *cdev, char **envp,
int num_envp, char *buffer, int buffer_size)
{
@@ -744,9 +848,7 @@ static int input_dev_uevent(struct class_device *cdev, char **envp,
if (test_bit(EV_SW, dev->evbit))
INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
- envp[i++] = buffer + len;
- len += snprintf(buffer + len, buffer_size - len, "MODALIAS=");
- len += print_modalias(buffer + len, buffer_size - len, dev) + 1;
+ INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
envp[i] = NULL;
return 0;
@@ -790,7 +892,7 @@ int input_register_device(struct input_dev *dev)
return -EINVAL;
}
- init_MUTEX(&dev->sem);
+ mutex_init(&dev->mutex);
set_bit(EV_SYN, dev->evbit);
/*
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 20e2972b9204f..949bdcef8c2be 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -171,9 +171,8 @@ static int joydev_open(struct inode *inode, struct file *file)
if (i >= JOYDEV_MINORS || !joydev_table[i])
return -ENODEV;
- if (!(list = kmalloc(sizeof(struct joydev_list), GFP_KERNEL)))
+ if (!(list = kzalloc(sizeof(struct joydev_list), GFP_KERNEL)))
return -ENOMEM;
- memset(list, 0, sizeof(struct joydev_list));
list->joydev = joydev_table[i];
list_add_tail(&list->node, &joydev_table[i]->list);
@@ -457,9 +456,8 @@ static struct input_handle *joydev_connect(struct input_handler *handler, struct
return NULL;
}
- if (!(joydev = kmalloc(sizeof(struct joydev), GFP_KERNEL)))
+ if (!(joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL)))
return NULL;
- memset(joydev, 0, sizeof(struct joydev));
INIT_LIST_HEAD(&joydev->list);
init_waitqueue_head(&joydev->wait);
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index ec55a29fc8610..7249d324297b8 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/mutex.h>
#include <asm/system.h>
#include <asm/amigahw.h>
@@ -52,7 +53,7 @@ MODULE_PARM_DESC(map, "Map of attached joysticks in form of <a>,<b> (default is
__obsolete_setup("amijoy=");
static int amijoy_used;
-static DECLARE_MUTEX(amijoy_sem);
+static DEFINE_MUTEX(amijoy_mutex);
static struct input_dev *amijoy_dev[2];
static char *amijoy_phys[2] = { "amijoy/input0", "amijoy/input1" };
@@ -85,7 +86,7 @@ static int amijoy_open(struct input_dev *dev)
{
int err;
- err = down_interruptible(&amijoy_sem);
+ err = mutex_lock_interruptible(&amijoy_mutex);
if (err)
return err;
@@ -97,16 +98,16 @@ static int amijoy_open(struct input_dev *dev)
amijoy_used++;
out:
- up(&amijoy_sem);
+ mutex_unlock(&amijoy_mutex);
return err;
}
static void amijoy_close(struct input_dev *dev)
{
- down(&amijoy_sem);
+ mutex_lock(&amijoy_mutex);
if (!--amijoy_used)
free_irq(IRQ_AMIGA_VERTB, amijoy_interrupt);
- up(&amijoy_sem);
+ mutex_unlock(&amijoy_mutex);
}
static int __init amijoy_init(void)
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index dcffc34f30c3c..e61894685cb1b 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -38,6 +38,7 @@
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/input.h>
+#include <linux/mutex.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver");
@@ -111,7 +112,7 @@ struct db9 {
struct pardevice *pd;
int mode;
int used;
- struct semaphore sem;
+ struct mutex mutex;
char phys[DB9_MAX_DEVICES][32];
};
@@ -525,7 +526,7 @@ static int db9_open(struct input_dev *dev)
struct parport *port = db9->pd->port;
int err;
- err = down_interruptible(&db9->sem);
+ err = mutex_lock_interruptible(&db9->mutex);
if (err)
return err;
@@ -539,7 +540,7 @@ static int db9_open(struct input_dev *dev)
mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME);
}
- up(&db9->sem);
+ mutex_unlock(&db9->mutex);
return 0;
}
@@ -548,14 +549,14 @@ static void db9_close(struct input_dev *dev)
struct db9 *db9 = dev->private;
struct parport *port = db9->pd->port;
- down(&db9->sem);
+ mutex_lock(&db9->mutex);
if (!--db9->used) {
del_timer_sync(&db9->timer);
parport_write_control(port, 0x00);
parport_data_forward(port);
parport_release(db9->pd);
}
- up(&db9->sem);
+ mutex_unlock(&db9->mutex);
}
static struct db9 __init *db9_probe(int parport, int mode)
@@ -603,7 +604,7 @@ static struct db9 __init *db9_probe(int parport, int mode)
goto err_unreg_pardev;
}
- init_MUTEX(&db9->sem);
+ mutex_init(&db9->mutex);
db9->pd = pd;
db9->mode = mode;
init_timer(&db9->timer);
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 900587acdb47c..ecbdb6b9bbd6d 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -7,6 +7,7 @@
* Based on the work of:
* Andree Borrmann John Dahlstrom
* David Kuder Nathan Hand
+ * Raphael Assenat
*/
/*
@@ -36,6 +37,7 @@
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/input.h>
+#include <linux/mutex.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("NES, SNES, N64, MultiSystem, PSX gamepad driver");
@@ -72,8 +74,9 @@ __obsolete_setup("gc_3=");
#define GC_N64 6
#define GC_PSX 7
#define GC_DDR 8
+#define GC_SNESMOUSE 9
-#define GC_MAX 8
+#define GC_MAX 9
#define GC_REFRESH_TIME HZ/100
@@ -83,7 +86,7 @@ struct gc {
struct timer_list timer;
unsigned char pads[GC_MAX + 1];
int used;
- struct semaphore sem;
+ struct mutex mutex;
char phys[GC_MAX_DEVICES][32];
};
@@ -93,7 +96,7 @@ static int gc_status_bit[] = { 0x40, 0x80, 0x20, 0x10, 0x08 };
static char *gc_names[] = { NULL, "SNES pad", "NES pad", "NES FourPort", "Multisystem joystick",
"Multisystem 2-button joystick", "N64 controller", "PSX controller",
- "PSX DDR controller" };
+ "PSX DDR controller", "SNES mouse" };
/*
* N64 support.
*/
@@ -205,9 +208,12 @@ static void gc_n64_process_packet(struct gc *gc)
* NES/SNES support.
*/
-#define GC_NES_DELAY 6 /* Delay between bits - 6us */
-#define GC_NES_LENGTH 8 /* The NES pads use 8 bits of data */
-#define GC_SNES_LENGTH 12 /* The SNES true length is 16, but the last 4 bits are unused */
+#define GC_NES_DELAY 6 /* Delay between bits - 6us */
+#define GC_NES_LENGTH 8 /* The NES pads use 8 bits of data */
+#define GC_SNES_LENGTH 12 /* The SNES true length is 16, but the
+ last 4 bits are unused */
+#define GC_SNESMOUSE_LENGTH 32 /* The SNES mouse uses 32 bits, the first
+ 16 bits are equivalent to a gamepad */
#define GC_NES_POWER 0xfc
#define GC_NES_CLOCK 0x01
@@ -242,11 +248,15 @@ static void gc_nes_read_packet(struct gc *gc, int length, unsigned char *data)
static void gc_nes_process_packet(struct gc *gc)
{
- unsigned char data[GC_SNES_LENGTH];
+ unsigned char data[GC_SNESMOUSE_LENGTH];
struct input_dev *dev;
- int i, j, s;
+ int i, j, s, len;
+ char x_rel, y_rel;
+
+ len = gc->pads[GC_SNESMOUSE] ? GC_SNESMOUSE_LENGTH :
+ (gc->pads[GC_SNES] ? GC_SNES_LENGTH : GC_NES_LENGTH);
- gc_nes_read_packet(gc, gc->pads[GC_SNES] ? GC_SNES_LENGTH : GC_NES_LENGTH, data);
+ gc_nes_read_packet(gc, len, data);
for (i = 0; i < GC_MAX_DEVICES; i++) {
@@ -269,6 +279,44 @@ static void gc_nes_process_packet(struct gc *gc)
for (j = 0; j < 8; j++)
input_report_key(dev, gc_snes_btn[j], s & data[gc_snes_bytes[j]]);
+ if (s & gc->pads[GC_SNESMOUSE]) {
+ /*
+ * The 4 unused bits from SNES controllers appear to be ID bits
+ * so use them to make sure iwe are dealing with a mouse.
+ * gamepad is connected. This is important since
+ * my SNES gamepad sends 1's for bits 16-31, which
+ * cause the mouse pointer to quickly move to the
+ * upper left corner of the screen.
+ */
+ if (!(s & data[12]) && !(s & data[13]) &&
+ !(s & data[14]) && (s & data[15])) {
+ input_report_key(dev, BTN_LEFT, s & data[9]);
+ input_report_key(dev, BTN_RIGHT, s & data[8]);
+
+ x_rel = y_rel = 0;
+ for (j = 0; j < 7; j++) {
+ x_rel <<= 1;
+ if (data[25 + j] & s)
+ x_rel |= 1;
+
+ y_rel <<= 1;
+ if (data[17 + j] & s)
+ y_rel |= 1;
+ }
+
+ if (x_rel) {
+ if (data[24] & s)
+ x_rel = -x_rel;
+ input_report_rel(dev, REL_X, x_rel);
+ }
+
+ if (y_rel) {
+ if (data[16] & s)
+ y_rel = -y_rel;
+ input_report_rel(dev, REL_Y, y_rel);
+ }
+ }
+ }
input_sync(dev);
}
}
@@ -524,10 +572,10 @@ static void gc_timer(unsigned long private)
gc_n64_process_packet(gc);
/*
- * NES and SNES pads
+ * NES and SNES pads or mouse
*/
- if (gc->pads[GC_NES] || gc->pads[GC_SNES])
+ if (gc->pads[GC_NES] || gc->pads[GC_SNES] || gc->pads[GC_SNESMOUSE])
gc_nes_process_packet(gc);
/*
@@ -552,7 +600,7 @@ static int gc_open(struct input_dev *dev)
struct gc *gc = dev->private;
int err;
- err = down_interruptible(&gc->sem);
+ err = mutex_lock_interruptible(&gc->mutex);
if (err)
return err;
@@ -562,7 +610,7 @@ static int gc_open(struct input_dev *dev)
mod_timer(&gc->timer, jiffies + GC_REFRESH_TIME);
}
- up(&gc->sem);
+ mutex_unlock(&gc->mutex);
return 0;
}
@@ -570,13 +618,13 @@ static void gc_close(struct input_dev *dev)
{
struct gc *gc = dev->private;
- down(&gc->sem);
+ mutex_lock(&gc->mutex);
if (!--gc->used) {
del_timer_sync(&gc->timer);
parport_write_control(gc->pd->port, 0x00);
parport_release(gc->pd);
}
- up(&gc->sem);
+ mutex_unlock(&gc->mutex);
}
static int __init gc_setup_pad(struct gc *gc, int idx, int pad_type)
@@ -609,10 +657,13 @@ static int __init gc_setup_pad(struct gc *gc, int idx, int pad_type)
input_dev->open = gc_open;
input_dev->close = gc_close;
- input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+ if (pad_type != GC_SNESMOUSE) {
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
- for (i = 0; i < 2; i++)
- input_set_abs_params(input_dev, ABS_X + i, -1, 1, 0, 0);
+ for (i = 0; i < 2; i++)
+ input_set_abs_params(input_dev, ABS_X + i, -1, 1, 0, 0);
+ } else
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
gc->pads[0] |= gc_status_bit[idx];
gc->pads[pad_type] |= gc_status_bit[idx];
@@ -630,6 +681,13 @@ static int __init gc_setup_pad(struct gc *gc, int idx, int pad_type)
break;
+ case GC_SNESMOUSE:
+ set_bit(BTN_LEFT, input_dev->keybit);
+ set_bit(BTN_RIGHT, input_dev->keybit);
+ set_bit(REL_X, input_dev->relbit);
+ set_bit(REL_Y, input_dev->relbit);
+ break;
+
case GC_SNES:
for (i = 4; i < 8; i++)
set_bit(gc_snes_btn[i], input_dev->keybit);
@@ -693,7 +751,7 @@ static struct gc __init *gc_probe(int parport, int *pads, int n_pads)
goto err_unreg_pardev;
}
- init_MUTEX(&gc->sem);
+ mutex_init(&gc->mutex);
gc->pd = pd;
init_timer(&gc->timer);
gc->timer.data = (long) gc;
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 4678b6dab43bf..2b8e8456c9fae 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -42,14 +42,14 @@ static int make_magnitude_modifier(struct iforce* iforce,
unsigned char data[3];
if (!no_alloc) {
- down(&iforce->mem_mutex);
+ mutex_lock(&iforce->mem_mutex);
if (allocate_resource(&(iforce->device_memory), mod_chunk, 2,
iforce->device_memory.start, iforce->device_memory.end, 2L,
NULL, NULL)) {
- up(&iforce->mem_mutex);
+ mutex_unlock(&iforce->mem_mutex);
return -ENOMEM;
}
- up(&iforce->mem_mutex);
+ mutex_unlock(&iforce->mem_mutex);
}
data[0] = LO(mod_chunk->start);
@@ -75,14 +75,14 @@ static int make_period_modifier(struct iforce* iforce,
period = TIME_SCALE(period);
if (!no_alloc) {
- down(&iforce->mem_mutex);
+ mutex_lock(&iforce->mem_mutex);
if (allocate_resource(&(iforce->device_memory), mod_chunk, 0x0c,
iforce->device_memory.start, iforce->device_memory.end, 2L,
NULL, NULL)) {
- up(&iforce->mem_mutex);
+ mutex_unlock(&iforce->mem_mutex);
return -ENOMEM;
}
- up(&iforce->mem_mutex);
+ mutex_unlock(&iforce->mem_mutex);
}
data[0] = LO(mod_chunk->start);
@@ -115,14 +115,14 @@ static int make_envelope_modifier(struct iforce* iforce,
fade_duration = TIME_SCALE(fade_duration);
if (!no_alloc) {
- down(&iforce->mem_mutex);
+ mutex_lock(&iforce->mem_mutex);
if (allocate_resource(&(iforce->device_memory), mod_chunk, 0x0e,
iforce->device_memory.start, iforce->device_memory.end, 2L,
NULL, NULL)) {
- up(&iforce->mem_mutex);
+ mutex_unlock(&iforce->mem_mutex);
return -ENOMEM;
}
- up(&iforce->mem_mutex);
+ mutex_unlock(&iforce->mem_mutex);
}
data[0] = LO(mod_chunk->start);
@@ -152,14 +152,14 @@ static int make_condition_modifier(struct iforce* iforce,
unsigned char data[10];
if (!no_alloc) {
- down(&iforce->mem_mutex);
+ mutex_lock(&iforce->mem_mutex);
if (allocate_resource(&(iforce->device_memory), mod_chunk, 8,
iforce->device_memory.start, iforce->device_memory.end, 2L,
NULL, NULL)) {
- up(&iforce->mem_mutex);
+ mutex_unlock(&iforce->mem_mutex);
return -ENOMEM;
}
- up(&iforce->mem_mutex);
+ mutex_unlock(&iforce->mem_mutex);
}
data[0] = LO(mod_chunk->start);
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b6bc049980471..ab0a26b924cac 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -350,7 +350,7 @@ int iforce_init_device(struct iforce *iforce)
init_waitqueue_head(&iforce->wait);
spin_lock_init(&iforce->xmit_lock);
- init_MUTEX(&iforce->mem_mutex);
+ mutex_init(&iforce->mem_mutex);
iforce->xmit.buf = iforce->xmit_data;
iforce->dev = input_dev;
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 146f406b8f8a4..668f24535ba07 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -37,7 +37,7 @@
#include <linux/serio.h>
#include <linux/config.h>
#include <linux/circ_buf.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
/* This module provides arbitrary resource management routines.
* I use it to manage the device's memory.
@@ -45,6 +45,7 @@
*/
#include <linux/ioport.h>
+
#define IFORCE_MAX_LENGTH 16
/* iforce::bus */
@@ -146,7 +147,7 @@ struct iforce {
wait_queue_head_t wait;
struct resource device_memory;
struct iforce_core_effect core_effects[FF_EFFECTS_MAX];
- struct semaphore mem_mutex;
+ struct mutex mem_mutex;
};
/* Get hi and low bytes of a 16-bits int */
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index b154938e88a4f..5570fd5487c73 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
+#include <linux/mutex.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("TurboGraFX parallel port interface driver");
@@ -86,7 +87,7 @@ static struct tgfx {
char phys[TGFX_MAX_DEVICES][32];
int sticks;
int used;
- struct semaphore sem;
+ struct mutex sem;
} *tgfx_base[TGFX_MAX_PORTS];
/*
@@ -128,7 +129,7 @@ static int tgfx_open(struct input_dev *dev)
struct tgfx *tgfx = dev->private;
int err;
- err = down_interruptible(&tgfx->sem);
+ err = mutex_lock_interruptible(&tgfx->sem);
if (err)
return err;
@@ -138,7 +139,7 @@ static int tgfx_open(struct input_dev *dev)
mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME);
}
- up(&tgfx->sem);
+ mutex_unlock(&tgfx->sem);
return 0;
}
@@ -146,13 +147,13 @@ static void tgfx_close(struct input_dev *dev)
{
struct tgfx *tgfx = dev->private;
- down(&tgfx->sem);
+ mutex_lock(&tgfx->sem);
if (!--tgfx->used) {
del_timer_sync(&tgfx->timer);
parport_write_control(tgfx->pd->port, 0x00);
parport_release(tgfx->pd);
}
- up(&tgfx->sem);
+ mutex_unlock(&tgfx->sem);
}
@@ -191,7 +192,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
goto err_unreg_pardev;
}
- init_MUTEX(&tgfx->sem);
+ mutex_init(&tgfx->sem);
tgfx->pd = pd;
init_timer(&tgfx->timer);
tgfx->timer.data = (long) tgfx;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 3b0ac3b43c541..a9dda56f62c41 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -13,7 +13,7 @@ menuconfig INPUT_KEYBOARD
if INPUT_KEYBOARD
config KEYBOARD_ATKBD
- tristate "AT keyboard" if !X86_PC
+ tristate "AT keyboard" if EMBEDDED || !X86_PC
default y
select SERIO
select SERIO_LIBPS2
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index ffacf6eca5f53..fad04b66d268f 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -27,6 +27,7 @@
#include <linux/serio.h>
#include <linux/workqueue.h>
#include <linux/libps2.h>
+#include <linux/mutex.h>
#define DRIVER_DESC "AT and PS/2 keyboard driver"
@@ -216,7 +217,7 @@ struct atkbd {
unsigned long time;
struct work_struct event_work;
- struct semaphore event_sem;
+ struct mutex event_mutex;
unsigned long event_mask;
};
@@ -302,19 +303,19 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
if (atkbd->translated) {
if (atkbd->emul ||
- !(code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1 ||
- code == ATKBD_RET_HANGUEL || code == ATKBD_RET_HANJA ||
- (code == ATKBD_RET_ERR && !atkbd->err_xl) ||
- (code == ATKBD_RET_BAT && !atkbd->bat_xl))) {
+ (code != ATKBD_RET_EMUL0 && code != ATKBD_RET_EMUL1 &&
+ code != ATKBD_RET_HANGUEL && code != ATKBD_RET_HANJA &&
+ (code != ATKBD_RET_ERR || atkbd->err_xl) &&
+ (code != ATKBD_RET_BAT || atkbd->bat_xl))) {
atkbd->release = code >> 7;
code &= 0x7f;
}
if (!atkbd->emul) {
if ((code & 0x7f) == (ATKBD_RET_BAT & 0x7f))
- atkbd->bat_xl = !atkbd->release;
+ atkbd->bat_xl = !(data >> 7);
if ((code & 0x7f) == (ATKBD_RET_ERR & 0x7f))
- atkbd->err_xl = !atkbd->release;
+ atkbd->err_xl = !(data >> 7);
}
}
@@ -449,7 +450,7 @@ static void atkbd_event_work(void *data)
unsigned char param[2];
int i, j;
- down(&atkbd->event_sem);
+ mutex_lock(&atkbd->event_mutex);
if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) {
param[0] = (test_bit(LED_SCROLLL, dev->led) ? 1 : 0)
@@ -480,7 +481,7 @@ static void atkbd_event_work(void *data)
ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_SETREP);
}
- up(&atkbd->event_sem);
+ mutex_unlock(&atkbd->event_mutex);
}
/*
@@ -846,7 +847,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->dev = dev;
ps2_init(&atkbd->ps2dev, serio);
INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd);
- init_MUTEX(&atkbd->event_sem);
+ mutex_init(&atkbd->event_mutex);
switch (serio->id.type) {
@@ -862,9 +863,6 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->softrepeat = atkbd_softrepeat;
atkbd->scroll = atkbd_scroll;
- if (!atkbd->write)
- atkbd->softrepeat = 1;
-
if (atkbd->softrepeat)
atkbd->softraw = 1;
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
index e301ee4ca264e..96c6bf77248ae 100644
--- a/drivers/input/keyboard/corgikbd.c
+++ b/drivers/input/keyboard/corgikbd.c
@@ -29,11 +29,11 @@
#define KB_COLS 12
#define KB_ROWMASK(r) (1 << (r))
#define SCANCODE(r,c) ( ((r)<<4) + (c) + 1 )
-/* zero code, 124 scancodes + 3 hinge combinations */
-#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 +3 )
-#define SCAN_INTERVAL (HZ/10)
+/* zero code, 124 scancodes */
+#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 )
-#define HINGE_SCAN_INTERVAL (HZ/4)
+#define SCAN_INTERVAL (50) /* ms */
+#define HINGE_SCAN_INTERVAL (250) /* ms */
#define CORGI_KEY_CALENDER KEY_F1
#define CORGI_KEY_ADDRESS KEY_F2
@@ -49,9 +49,6 @@
#define CORGI_KEY_MAIL KEY_F10
#define CORGI_KEY_OK KEY_F11
#define CORGI_KEY_MENU KEY_F12
-#define CORGI_HINGE_0 KEY_KP0
-#define CORGI_HINGE_1 KEY_KP1
-#define CORGI_HINGE_2 KEY_KP2
static unsigned char corgikbd_keycode[NR_SCANCODES] = {
0, /* 0 */
@@ -63,7 +60,6 @@ static unsigned char corgikbd_keycode[NR_SCANCODES] = {
CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
- CORGI_HINGE_0, CORGI_HINGE_1, CORGI_HINGE_2 /* 125-127 */
};
@@ -187,7 +183,7 @@ static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data, struct pt_regs
/* if any keys are pressed, enable the timer */
if (num_pressed)
- mod_timer(&corgikbd_data->timer, jiffies + SCAN_INTERVAL);
+ mod_timer(&corgikbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
spin_unlock_irqrestore(&corgikbd_data->lock, flags);
}
@@ -228,6 +224,7 @@ static void corgikbd_timer_callback(unsigned long data)
* 0x0c - Keyboard and Screen Closed
*/
+#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
#define HINGE_STABLE_COUNT 2
static int sharpsl_hinge_state;
static int hinge_count;
@@ -239,6 +236,7 @@ static void corgikbd_hinge_timer(unsigned long data)
unsigned long flags;
gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
+ gprr |= (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0);
if (gprr != sharpsl_hinge_state) {
hinge_count = 0;
sharpsl_hinge_state = gprr;
@@ -249,27 +247,38 @@ static void corgikbd_hinge_timer(unsigned long data)
input_report_switch(corgikbd_data->input, SW_0, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
input_report_switch(corgikbd_data->input, SW_1, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
+ input_report_switch(corgikbd_data->input, SW_2, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0));
input_sync(corgikbd_data->input);
spin_unlock_irqrestore(&corgikbd_data->lock, flags);
}
}
- mod_timer(&corgikbd_data->htimer, jiffies + HINGE_SCAN_INTERVAL);
+ mod_timer(&corgikbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
}
#ifdef CONFIG_PM
static int corgikbd_suspend(struct platform_device *dev, pm_message_t state)
{
+ int i;
struct corgikbd *corgikbd = platform_get_drvdata(dev);
+
corgikbd->suspended = 1;
+ /* strobe 0 is the power key so this can't be made an input for
+ powersaving therefore i = 1 */
+ for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
+ pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_IN);
return 0;
}
static int corgikbd_resume(struct platform_device *dev)
{
+ int i;
struct corgikbd *corgikbd = platform_get_drvdata(dev);
+ for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
+ pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
+
/* Upon resume, ignore the suspend key for a short while */
corgikbd->suspend_jiffies=jiffies;
corgikbd->suspended = 0;
@@ -333,10 +342,11 @@ static int __init corgikbd_probe(struct platform_device *pdev)
clear_bit(0, input_dev->keybit);
set_bit(SW_0, input_dev->swbit);
set_bit(SW_1, input_dev->swbit);
+ set_bit(SW_2, input_dev->swbit);
input_register_device(corgikbd->input);
- mod_timer(&corgikbd->htimer, jiffies + HINGE_SCAN_INTERVAL);
+ mod_timer(&corgikbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
/* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) {
@@ -351,6 +361,9 @@ static int __init corgikbd_probe(struct platform_device *pdev)
for (i = 0; i < CORGI_KEY_STROBE_NUM; i++)
pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
+ /* Setup the headphone jack as an input */
+ pxa_gpio_mode(CORGI_GPIO_AK_INT | GPIO_IN);
+
return 0;
}
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 63f387e4b783d..1dca3cf42a54c 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -250,16 +250,19 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv)
struct hil_kbd *kbd;
uint8_t did, *idd;
int i;
-
+
kbd = kzalloc(sizeof(*kbd), GFP_KERNEL);
if (!kbd)
return -ENOMEM;
kbd->dev = input_allocate_device();
- if (!kbd->dev) goto bail1;
+ if (!kbd->dev)
+ goto bail0;
+
kbd->dev->private = kbd;
- if (serio_open(serio, drv)) goto bail0;
+ if (serio_open(serio, drv))
+ goto bail1;
serio_set_drvdata(serio, kbd);
kbd->serio = serio;
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
index 83999d5831225..bc61cf8cfc653 100644
--- a/drivers/input/keyboard/spitzkbd.c
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -30,6 +30,7 @@
#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
#define NR_SCANCODES ((KB_ROWS<<4) + 1)
+#define SCAN_INTERVAL (50) /* ms */
#define HINGE_SCAN_INTERVAL (150) /* ms */
#define SPITZ_KEY_CALENDER KEY_F1
@@ -230,7 +231,7 @@ static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data, struct pt_regs
/* if any keys are pressed, enable the timer */
if (num_pressed)
- mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(100));
+ mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
}
@@ -287,6 +288,7 @@ static void spitzkbd_hinge_timer(unsigned long data)
unsigned long flags;
state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
+ state |= (GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT));
if (state != sharpsl_hinge_state) {
hinge_count = 0;
sharpsl_hinge_state = state;
@@ -299,6 +301,7 @@ static void spitzkbd_hinge_timer(unsigned long data)
input_report_switch(spitzkbd_data->input, SW_0, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
input_report_switch(spitzkbd_data->input, SW_1, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
+ input_report_switch(spitzkbd_data->input, SW_2, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0));
input_sync(spitzkbd_data->input);
spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
@@ -397,6 +400,7 @@ static int __init spitzkbd_probe(struct platform_device *dev)
clear_bit(0, input_dev->keybit);
set_bit(SW_0, input_dev->swbit);
set_bit(SW_1, input_dev->swbit);
+ set_bit(SW_2, input_dev->swbit);
input_register_device(input_dev);
@@ -432,6 +436,9 @@ static int __init spitzkbd_probe(struct platform_device *dev)
request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr,
SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
"Spitzkbd SWB", spitzkbd);
+ request_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd_hinge_isr,
+ SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
+ "Spitzkbd HP", spitzkbd);
printk(KERN_INFO "input: Spitz Keyboard Registered\n");
@@ -450,6 +457,7 @@ static int spitzkbd_remove(struct platform_device *dev)
free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
+ free_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd);
del_timer_sync(&spitzkbd->htimer);
del_timer_sync(&spitzkbd->timer);
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 1ef477f4469c1..afd322185bbff 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -24,7 +24,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("PC Speaker beeper driver");
MODULE_LICENSE("GPL");
-static struct platform_device *pcspkr_platform_device;
static DEFINE_SPINLOCK(i8253_beep_lock);
static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -135,35 +134,11 @@ static struct platform_driver pcspkr_platform_driver = {
static int __init pcspkr_init(void)
{
- int err;
-
- err = platform_driver_register(&pcspkr_platform_driver);
- if (err)
- return err;
-
- pcspkr_platform_device = platform_device_alloc("pcspkr", -1);
- if (!pcspkr_platform_device) {
- err = -ENOMEM;
- goto err_unregister_driver;
- }
-
- err = platform_device_add(pcspkr_platform_device);
- if (err)
- goto err_free_device;
-
- return 0;
-
- err_free_device:
- platform_device_put(pcspkr_platform_device);
- err_unregister_driver:
- platform_driver_unregister(&pcspkr_platform_driver);
-
- return err;
+ return platform_driver_register(&pcspkr_platform_driver);
}
static void __exit pcspkr_exit(void)
{
- platform_device_unregister(pcspkr_platform_device);
platform_driver_unregister(&pcspkr_platform_driver);
}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 546ed9b4901df..d723e9ad7c41a 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -194,7 +194,7 @@ static int uinput_open(struct inode *inode, struct file *file)
if (!newdev)
return -ENOMEM;
- init_MUTEX(&newdev->sem);
+ mutex_init(&newdev->mutex);
spin_lock_init(&newdev->requests_lock);
init_waitqueue_head(&newdev->requests_waitq);
init_waitqueue_head(&newdev->waitq);
@@ -340,7 +340,7 @@ static ssize_t uinput_write(struct file *file, const char __user *buffer, size_t
struct uinput_device *udev = file->private_data;
int retval;
- retval = down_interruptible(&udev->sem);
+ retval = mutex_lock_interruptible(&udev->mutex);
if (retval)
return retval;
@@ -348,7 +348,7 @@ static ssize_t uinput_write(struct file *file, const char __user *buffer, size_t
uinput_inject_event(udev, buffer, count) :
uinput_setup_device(udev, buffer, count);
- up(&udev->sem);
+ mutex_unlock(&udev->mutex);
return retval;
}
@@ -369,7 +369,7 @@ static ssize_t uinput_read(struct file *file, char __user *buffer, size_t count,
if (retval)
return retval;
- retval = down_interruptible(&udev->sem);
+ retval = mutex_lock_interruptible(&udev->mutex);
if (retval)
return retval;
@@ -388,7 +388,7 @@ static ssize_t uinput_read(struct file *file, char __user *buffer, size_t count,
}
out:
- up(&udev->sem);
+ mutex_unlock(&udev->mutex);
return retval;
}
@@ -439,7 +439,7 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
udev = file->private_data;
- retval = down_interruptible(&udev->sem);
+ retval = mutex_lock_interruptible(&udev->mutex);
if (retval)
return retval;
@@ -589,7 +589,7 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
out:
- up(&udev->sem);
+ mutex_unlock(&udev->mutex);
return retval;
}
diff --git a/drivers/input/mouse/hil_ptr.c b/drivers/input/mouse/hil_ptr.c
index bfb564fd8fe2e..69f02178c528c 100644
--- a/drivers/input/mouse/hil_ptr.c
+++ b/drivers/input/mouse/hil_ptr.c
@@ -249,10 +249,13 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver)
return -ENOMEM;
ptr->dev = input_allocate_device();
- if (!ptr->dev) goto bail0;
+ if (!ptr->dev)
+ goto bail0;
+
ptr->dev->private = ptr;
- if (serio_open(serio, driver)) goto bail1;
+ if (serio_open(serio, driver))
+ goto bail1;
serio_set_drvdata(serio, ptr);
ptr->serio = serio;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index ad62174676761..32d70ed8f41dd 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -20,6 +20,8 @@
#include <linux/serio.h>
#include <linux/init.h>
#include <linux/libps2.h>
+#include <linux/mutex.h>
+
#include "psmouse.h"
#include "synaptics.h"
#include "logips2pp.h"
@@ -98,13 +100,13 @@ __obsolete_setup("psmouse_resetafter=");
__obsolete_setup("psmouse_rate=");
/*
- * psmouse_sem protects all operations changing state of mouse
+ * psmouse_mutex protects all operations changing state of mouse
* (connecting, disconnecting, changing rate or resolution via
* sysfs). We could use a per-device semaphore but since there
* rarely more than one PS/2 mouse connected and since semaphore
* is taken in "slow" paths it is not worth it.
*/
-static DECLARE_MUTEX(psmouse_sem);
+static DEFINE_MUTEX(psmouse_mutex);
static struct workqueue_struct *kpsmoused_wq;
@@ -868,7 +870,7 @@ static void psmouse_resync(void *p)
int failed = 0, enabled = 0;
int i;
- down(&psmouse_sem);
+ mutex_lock(&psmouse_mutex);
if (psmouse->state != PSMOUSE_RESYNCING)
goto out;
@@ -948,7 +950,7 @@ static void psmouse_resync(void *p)
if (parent)
psmouse_activate(parent);
out:
- up(&psmouse_sem);
+ mutex_unlock(&psmouse_mutex);
}
/*
@@ -974,14 +976,14 @@ static void psmouse_disconnect(struct serio *serio)
sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group);
- down(&psmouse_sem);
+ mutex_lock(&psmouse_mutex);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
/* make sure we don't have a resync in progress */
- up(&psmouse_sem);
+ mutex_unlock(&psmouse_mutex);
flush_workqueue(kpsmoused_wq);
- down(&psmouse_sem);
+ mutex_lock(&psmouse_mutex);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
@@ -1004,7 +1006,7 @@ static void psmouse_disconnect(struct serio *serio)
if (parent)
psmouse_activate(parent);
- up(&psmouse_sem);
+ mutex_unlock(&psmouse_mutex);
}
static int psmouse_switch_protocol(struct psmouse *psmouse, struct psmouse_protocol *proto)
@@ -1076,7 +1078,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
struct input_dev *input_dev;
int retval = -ENOMEM;
- down(&psmouse_sem);
+ mutex_lock(&psmouse_mutex);
/*
* If this is a pass-through port deactivate parent so the device
@@ -1144,7 +1146,7 @@ out:
if (parent)
psmouse_activate(parent);
- up(&psmouse_sem);
+ mutex_unlock(&psmouse_mutex);
return retval;
}
@@ -1161,7 +1163,7 @@ static int psmouse_reconnect(struct serio *serio)
return -1;
}
- down(&psmouse_sem);
+ mutex_lock(&psmouse_mutex);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
@@ -1195,7 +1197,7 @@ out:
if (parent)
psmouse_activate(parent);
- up(&psmouse_sem);
+ mutex_unlock(&psmouse_mutex);
return rc;
}
@@ -1273,7 +1275,7 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
goto out_unpin;
}
- retval = down_interruptible(&psmouse_sem);
+ retval = mutex_lock_interruptible(&psmouse_mutex);
if (retval)
goto out_unpin;
@@ -1281,7 +1283,7 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
if (psmouse->state == PSMOUSE_IGNORE) {
retval = -ENODEV;
- goto out_up;
+ goto out_unlock;
}
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
@@ -1299,8 +1301,8 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
if (parent)
psmouse_activate(parent);
- out_up:
- up(&psmouse_sem);
+ out_unlock:
+ mutex_unlock(&psmouse_mutex);
out_unpin:
serio_unpin_driver(serio);
return retval;
@@ -1357,11 +1359,11 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
return -EIO;
}
- up(&psmouse_sem);
+ mutex_unlock(&psmouse_mutex);
serio_unpin_driver(serio);
serio_unregister_child_port(serio);
serio_pin_driver_uninterruptible(serio);
- down(&psmouse_sem);
+ mutex_lock(&psmouse_mutex);
if (serio->drv != &psmouse_drv) {
input_free_device(new_dev);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2051bec2c394b..ad5d0a85e9601 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -247,14 +247,12 @@ static void synaptics_pt_create(struct psmouse *psmouse)
{
struct serio *serio;
- serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!serio) {
printk(KERN_ERR "synaptics: not enough memory to allocate pass-through port\n");
return;
}
- memset(serio, 0, sizeof(struct serio));
-
serio->id.type = SERIO_PS_PSTHRU;
strlcpy(serio->name, "Synaptics pass-through", sizeof(serio->name));
strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->name));
@@ -605,14 +603,21 @@ static struct dmi_system_id toshiba_dmi_table[] = {
.ident = "Toshiba Satellite",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME , "Satellite"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
},
},
{
.ident = "Toshiba Dynabook",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME , "dynabook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "dynabook"),
+ },
+ },
+ {
+ .ident = "Toshiba Portege M300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
},
},
{ }
@@ -623,10 +628,9 @@ int synaptics_init(struct psmouse *psmouse)
{
struct synaptics_data *priv;
- psmouse->private = priv = kmalloc(sizeof(struct synaptics_data), GFP_KERNEL);
+ psmouse->private = priv = kzalloc(sizeof(struct synaptics_data), GFP_KERNEL);
if (!priv)
return -1;
- memset(priv, 0, sizeof(struct synaptics_data));
if (synaptics_query_hardware(psmouse)) {
printk(KERN_ERR "Unable to query Synaptics hardware.\n");
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 9abed18d2ecf7..b685a507955d2 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -412,9 +412,8 @@ static int mousedev_open(struct inode * inode, struct file * file)
if (i >= MOUSEDEV_MINORS || !mousedev_table[i])
return -ENODEV;
- if (!(list = kmalloc(sizeof(struct mousedev_list), GFP_KERNEL)))
+ if (!(list = kzalloc(sizeof(struct mousedev_list), GFP_KERNEL)))
return -ENOMEM;
- memset(list, 0, sizeof(struct mousedev_list));
spin_lock_init(&list->packet_lock);
list->pos_x = xres / 2;
@@ -626,9 +625,8 @@ static struct input_handle *mousedev_connect(struct input_handler *handler, stru
return NULL;
}
- if (!(mousedev = kmalloc(sizeof(struct mousedev), GFP_KERNEL)))
+ if (!(mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL)))
return NULL;
- memset(mousedev, 0, sizeof(struct mousedev));
INIT_LIST_HEAD(&mousedev->list);
init_waitqueue_head(&mousedev->wait);
diff --git a/drivers/input/power.c b/drivers/input/power.c
index bfc5c63ebffe2..526e6070600c8 100644
--- a/drivers/input/power.c
+++ b/drivers/input/power.c
@@ -103,9 +103,8 @@ static struct input_handle *power_connect(struct input_handler *handler,
{
struct input_handle *handle;
- if (!(handle = kmalloc(sizeof(struct input_handle), GFP_KERNEL)))
+ if (!(handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL)))
return NULL;
- memset(handle, 0, sizeof(struct input_handle));
handle->dev = dev;
handle->handler = handler;
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index ea499783fb12f..bbbe15e219044 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -872,9 +872,8 @@ int hil_mlc_register(hil_mlc *mlc) {
for (i = 0; i < HIL_MLC_DEVMEM; i++) {
struct serio *mlc_serio;
hil_mlc_copy_di_scratch(mlc, i);
- mlc_serio = kmalloc(sizeof(*mlc_serio), GFP_KERNEL);
+ mlc_serio = kzalloc(sizeof(*mlc_serio), GFP_KERNEL);
mlc->serio[i] = mlc_serio;
- memset(mlc_serio, 0, sizeof(*mlc_serio));
mlc_serio->id = hil_mlc_serio_id;
mlc_serio->write = hil_mlc_serio_write;
mlc_serio->open = hil_mlc_serio_open;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a4c6f35227239..f606e96bc2f4b 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -192,7 +192,9 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
#include <linux/pnp.h>
static int i8042_pnp_kbd_registered;
+static unsigned int i8042_pnp_kbd_devices;
static int i8042_pnp_aux_registered;
+static unsigned int i8042_pnp_aux_devices;
static int i8042_pnp_command_reg;
static int i8042_pnp_data_reg;
@@ -219,6 +221,7 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
strncat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
}
+ i8042_pnp_kbd_devices++;
return 0;
}
@@ -239,6 +242,7 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
strncat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
}
+ i8042_pnp_aux_devices++;
return 0;
}
@@ -287,21 +291,23 @@ static void i8042_pnp_exit(void)
static int __init i8042_pnp_init(void)
{
- int result_kbd = 0, result_aux = 0;
char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 };
+ int err;
if (i8042_nopnp) {
printk(KERN_INFO "i8042: PNP detection disabled\n");
return 0;
}
- if ((result_kbd = pnp_register_driver(&i8042_pnp_kbd_driver)) >= 0)
+ err = pnp_register_driver(&i8042_pnp_kbd_driver);
+ if (!err)
i8042_pnp_kbd_registered = 1;
- if ((result_aux = pnp_register_driver(&i8042_pnp_aux_driver)) >= 0)
+ err = pnp_register_driver(&i8042_pnp_aux_driver);
+ if (!err)
i8042_pnp_aux_registered = 1;
- if (result_kbd <= 0 && result_aux <= 0) {
+ if (!i8042_pnp_kbd_devices && !i8042_pnp_aux_devices) {
i8042_pnp_exit();
#if defined(__ia64__)
return -ENODEV;
@@ -311,24 +317,24 @@ static int __init i8042_pnp_init(void)
#endif
}
- if (result_kbd > 0)
+ if (i8042_pnp_kbd_devices)
snprintf(kbd_irq_str, sizeof(kbd_irq_str),
"%d", i8042_pnp_kbd_irq);
- if (result_aux > 0)
+ if (i8042_pnp_aux_devices)
snprintf(aux_irq_str, sizeof(aux_irq_str),
"%d", i8042_pnp_aux_irq);
printk(KERN_INFO "PNP: PS/2 Controller [%s%s%s] at %#x,%#x irq %s%s%s\n",
- i8042_pnp_kbd_name, (result_kbd > 0 && result_aux > 0) ? "," : "",
+ i8042_pnp_kbd_name, (i8042_pnp_kbd_devices && i8042_pnp_aux_devices) ? "," : "",
i8042_pnp_aux_name,
i8042_pnp_data_reg, i8042_pnp_command_reg,
- kbd_irq_str, (result_kbd > 0 && result_aux > 0) ? "," : "",
+ kbd_irq_str, (i8042_pnp_kbd_devices && i8042_pnp_aux_devices) ? "," : "",
aux_irq_str);
#if defined(__ia64__)
- if (result_kbd <= 0)
+ if (!i8042_pnp_kbd_devices)
i8042_nokbd = 1;
- if (result_aux <= 0)
+ if (!i8042_pnp_aux_devices)
i8042_noaux = 1;
#endif
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index d4c990f7c85e9..79c97f94bcbd5 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -84,7 +84,7 @@ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout)
maxbytes = sizeof(ps2dev->cmdbuf);
}
- down(&ps2dev->cmd_sem);
+ mutex_lock(&ps2dev->cmd_mutex);
serio_pause_rx(ps2dev->serio);
ps2dev->flags = PS2_FLAG_CMD;
@@ -94,7 +94,7 @@ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout)
wait_event_timeout(ps2dev->wait,
!(ps2dev->flags & PS2_FLAG_CMD),
msecs_to_jiffies(timeout));
- up(&ps2dev->cmd_sem);
+ mutex_unlock(&ps2dev->cmd_mutex);
}
/*
@@ -177,7 +177,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
return -1;
}
- down(&ps2dev->cmd_sem);
+ mutex_lock(&ps2dev->cmd_mutex);
serio_pause_rx(ps2dev->serio);
ps2dev->flags = command == PS2_CMD_GETID ? PS2_FLAG_WAITID : 0;
@@ -229,7 +229,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
ps2dev->flags = 0;
serio_continue_rx(ps2dev->serio);
- up(&ps2dev->cmd_sem);
+ mutex_unlock(&ps2dev->cmd_mutex);
return rc;
}
@@ -281,7 +281,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman
void ps2_init(struct ps2dev *ps2dev, struct serio *serio)
{
- init_MUTEX(&ps2dev->cmd_sem);
+ mutex_init(&ps2dev->cmd_mutex);
init_waitqueue_head(&ps2dev->wait);
ps2dev->serio = serio;
}
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 1d15c2819818d..a5c1fb3a4a519 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -171,9 +171,8 @@ static struct serio * __init parkbd_allocate_serio(void)
{
struct serio *serio;
- serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (serio) {
- memset(serio, 0, sizeof(struct serio));
serio->id.type = parkbd_mode;
serio->write = parkbd_write,
strlcpy(serio->name, "PARKBD AT/XT keyboard adapter", sizeof(serio->name));
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index a3bd11589bc3a..513d37fc1acfc 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -111,11 +111,10 @@ static int __devinit rpckbd_probe(struct platform_device *dev)
{
struct serio *serio;
- serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!serio)
return -ENOMEM;
- memset(serio, 0, sizeof(struct serio));
serio->id.type = SERIO_8042;
serio->write = rpckbd_write;
serio->open = rpckbd_open;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 2f76813c3a645..6521034bc9332 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/mutex.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Serio abstraction core");
@@ -52,10 +53,10 @@ EXPORT_SYMBOL(serio_rescan);
EXPORT_SYMBOL(serio_reconnect);
/*
- * serio_sem protects entire serio subsystem and is taken every time
+ * serio_mutex protects entire serio subsystem and is taken every time
* serio port or driver registrered or unregistered.
*/
-static DECLARE_MUTEX(serio_sem);
+static DEFINE_MUTEX(serio_mutex);
static LIST_HEAD(serio_list);
@@ -70,9 +71,9 @@ static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
{
int retval;
- down(&serio->drv_sem);
+ mutex_lock(&serio->drv_mutex);
retval = drv->connect(serio, drv);
- up(&serio->drv_sem);
+ mutex_unlock(&serio->drv_mutex);
return retval;
}
@@ -81,20 +82,20 @@ static int serio_reconnect_driver(struct serio *serio)
{
int retval = -1;
- down(&serio->drv_sem);
+ mutex_lock(&serio->drv_mutex);
if (serio->drv && serio->drv->reconnect)
retval = serio->drv->reconnect(serio);
- up(&serio->drv_sem);
+ mutex_unlock(&serio->drv_mutex);
return retval;
}
static void serio_disconnect_driver(struct serio *serio)
{
- down(&serio->drv_sem);
+ mutex_lock(&serio->drv_mutex);
if (serio->drv)
serio->drv->disconnect(serio);
- up(&serio->drv_sem);
+ mutex_unlock(&serio->drv_mutex);
}
static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
@@ -195,6 +196,7 @@ static void serio_queue_event(void *object, struct module *owner,
if ((event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC))) {
if (!try_module_get(owner)) {
printk(KERN_WARNING "serio: Can't get module reference, dropping event %d\n", event_type);
+ kfree(event);
goto out;
}
@@ -272,7 +274,7 @@ static void serio_handle_event(void)
struct serio_event *event;
struct serio_driver *serio_drv;
- down(&serio_sem);
+ mutex_lock(&serio_mutex);
/*
* Note that we handle only one event here to give swsusp
@@ -314,7 +316,7 @@ static void serio_handle_event(void)
serio_free_event(event);
}
- up(&serio_sem);
+ mutex_unlock(&serio_mutex);
}
/*
@@ -449,7 +451,7 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
struct device_driver *drv;
int retval;
- retval = down_interruptible(&serio_sem);
+ retval = mutex_lock_interruptible(&serio_mutex);
if (retval)
return retval;
@@ -469,7 +471,7 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
retval = -EINVAL;
}
- up(&serio_sem);
+ mutex_unlock(&serio_mutex);
return retval;
}
@@ -524,7 +526,7 @@ static void serio_init_port(struct serio *serio)
__module_get(THIS_MODULE);
spin_lock_init(&serio->lock);
- init_MUTEX(&serio->drv_sem);
+ mutex_init(&serio->drv_mutex);
device_initialize(&serio->dev);
snprintf(serio->dev.bus_id, sizeof(serio->dev.bus_id),
"serio%ld", (long)atomic_inc_return(&serio_no) - 1);
@@ -661,10 +663,10 @@ void __serio_register_port(struct serio *serio, struct module *owner)
*/
void serio_unregister_port(struct serio *serio)
{
- down(&serio_sem);
+ mutex_lock(&serio_mutex);
serio_disconnect_port(serio);
serio_destroy_port(serio);
- up(&serio_sem);
+ mutex_unlock(&serio_mutex);
}
/*
@@ -672,17 +674,17 @@ void serio_unregister_port(struct serio *serio)
*/
void serio_unregister_child_port(struct serio *serio)
{
- down(&serio_sem);
+ mutex_lock(&serio_mutex);
if (serio->child) {
serio_disconnect_port(serio->child);
serio_destroy_port(serio->child);
}
- up(&serio_sem);
+ mutex_unlock(&serio_mutex);
}
/*
* Submits register request to kseriod for subsequent execution.
- * Can be used when it is not obvious whether the serio_sem is
+ * Can be used when it is not obvious whether the serio_mutex is
* taken or not and when delayed execution is feasible.
*/
void __serio_unregister_port_delayed(struct serio *serio, struct module *owner)
@@ -765,7 +767,7 @@ void serio_unregister_driver(struct serio_driver *drv)
{
struct serio *serio;
- down(&serio_sem);
+ mutex_lock(&serio_mutex);
drv->manual_bind = 1; /* so serio_find_driver ignores it */
start_over:
@@ -779,7 +781,7 @@ start_over:
}
driver_unregister(&drv->driver);
- up(&serio_sem);
+ mutex_unlock(&serio_mutex);
}
static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
@@ -858,7 +860,7 @@ static int serio_resume(struct device *dev)
return 0;
}
-/* called from serio_driver->connect/disconnect methods under serio_sem */
+/* called from serio_driver->connect/disconnect methods under serio_mutex */
int serio_open(struct serio *serio, struct serio_driver *drv)
{
serio_set_drv(serio, drv);
@@ -870,7 +872,7 @@ int serio_open(struct serio *serio, struct serio_driver *drv)
return 0;
}
-/* called from serio_driver->connect/disconnect methods under serio_sem */
+/* called from serio_driver->connect/disconnect methods under serio_mutex */
void serio_close(struct serio *serio)
{
if (serio->close)
@@ -923,5 +925,5 @@ static void __exit serio_exit(void)
kthread_stop(serio_task);
}
-module_init(serio_init);
+subsys_initcall(serio_init);
module_exit(serio_exit);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 47e08de18d07a..5a2703b536dc0 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -19,6 +19,7 @@
#include <linux/devfs_fs_kernel.h>
#include <linux/miscdevice.h>
#include <linux/wait.h>
+#include <linux/mutex.h>
#define DRIVER_DESC "Raw serio driver"
@@ -46,7 +47,7 @@ struct serio_raw_list {
struct list_head node;
};
-static DECLARE_MUTEX(serio_raw_sem);
+static DEFINE_MUTEX(serio_raw_mutex);
static LIST_HEAD(serio_raw_list);
static unsigned int serio_raw_no;
@@ -81,7 +82,7 @@ static int serio_raw_open(struct inode *inode, struct file *file)
struct serio_raw_list *list;
int retval = 0;
- retval = down_interruptible(&serio_raw_sem);
+ retval = mutex_lock_interruptible(&serio_raw_mutex);
if (retval)
return retval;
@@ -95,12 +96,11 @@ static int serio_raw_open(struct inode *inode, struct file *file)
goto out;
}
- if (!(list = kmalloc(sizeof(struct serio_raw_list), GFP_KERNEL))) {
+ if (!(list = kzalloc(sizeof(struct serio_raw_list), GFP_KERNEL))) {
retval = -ENOMEM;
goto out;
}
- memset(list, 0, sizeof(struct serio_raw_list));
list->serio_raw = serio_raw;
file->private_data = list;
@@ -108,7 +108,7 @@ static int serio_raw_open(struct inode *inode, struct file *file)
list_add_tail(&list->node, &serio_raw->list);
out:
- up(&serio_raw_sem);
+ mutex_unlock(&serio_raw_mutex);
return retval;
}
@@ -130,12 +130,12 @@ static int serio_raw_release(struct inode *inode, struct file *file)
struct serio_raw_list *list = file->private_data;
struct serio_raw *serio_raw = list->serio_raw;
- down(&serio_raw_sem);
+ mutex_lock(&serio_raw_mutex);
serio_raw_fasync(-1, file, 0);
serio_raw_cleanup(serio_raw);
- up(&serio_raw_sem);
+ mutex_unlock(&serio_raw_mutex);
return 0;
}
@@ -194,7 +194,7 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer, siz
int retval;
unsigned char c;
- retval = down_interruptible(&serio_raw_sem);
+ retval = mutex_lock_interruptible(&serio_raw_mutex);
if (retval)
return retval;
@@ -219,7 +219,7 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer, siz
};
out:
- up(&serio_raw_sem);
+ mutex_unlock(&serio_raw_mutex);
return written;
}
@@ -275,14 +275,13 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
struct serio_raw *serio_raw;
int err;
- if (!(serio_raw = kmalloc(sizeof(struct serio_raw), GFP_KERNEL))) {
+ if (!(serio_raw = kzalloc(sizeof(struct serio_raw), GFP_KERNEL))) {
printk(KERN_ERR "serio_raw.c: can't allocate memory for a device\n");
return -ENOMEM;
}
- down(&serio_raw_sem);
+ mutex_lock(&serio_raw_mutex);
- memset(serio_raw, 0, sizeof(struct serio_raw));
snprintf(serio_raw->name, sizeof(serio_raw->name), "serio_raw%d", serio_raw_no++);
serio_raw->refcnt = 1;
serio_raw->serio = serio;
@@ -325,7 +324,7 @@ out_free:
serio_set_drvdata(serio, NULL);
kfree(serio_raw);
out:
- up(&serio_raw_sem);
+ mutex_unlock(&serio_raw_mutex);
return err;
}
@@ -350,7 +349,7 @@ static void serio_raw_disconnect(struct serio *serio)
{
struct serio_raw *serio_raw;
- down(&serio_raw_sem);
+ mutex_lock(&serio_raw_mutex);
serio_raw = serio_get_drvdata(serio);
@@ -361,7 +360,7 @@ static void serio_raw_disconnect(struct serio *serio)
if (!serio_raw_cleanup(serio_raw))
wake_up_interruptible(&serio_raw->wait);
- up(&serio_raw_sem);
+ mutex_unlock(&serio_raw_mutex);
}
static struct serio_device_id serio_raw_serio_ids[] = {
diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c
index ca1547929d62e..d678d144bbf86 100644
--- a/drivers/input/tsdev.c
+++ b/drivers/input/tsdev.c
@@ -157,9 +157,8 @@ static int tsdev_open(struct inode *inode, struct file *file)
if (i >= TSDEV_MINORS || !tsdev_table[i & TSDEV_MINOR_MASK])
return -ENODEV;
- if (!(list = kmalloc(sizeof(struct tsdev_list), GFP_KERNEL)))
+ if (!(list = kzalloc(sizeof(struct tsdev_list), GFP_KERNEL)))
return -ENOMEM;
- memset(list, 0, sizeof(struct tsdev_list));
list->raw = (i >= TSDEV_MINORS/2) ? 1 : 0;
@@ -379,9 +378,8 @@ static struct input_handle *tsdev_connect(struct input_handler *handler,
return NULL;
}
- if (!(tsdev = kmalloc(sizeof(struct tsdev), GFP_KERNEL)))
+ if (!(tsdev = kzalloc(sizeof(struct tsdev), GFP_KERNEL)))
return NULL;
- memset(tsdev, 0, sizeof(struct tsdev));
INIT_LIST_HEAD(&tsdev->list);
init_waitqueue_head(&tsdev->wait);
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 2a2b03ff096b6..7bbfd85ab7937 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -51,8 +51,8 @@ MODULE_LICENSE("GPL");
handler.
*/
-static void avmcs_config(dev_link_t *link);
-static void avmcs_release(dev_link_t *link);
+static int avmcs_config(struct pcmcia_device *link);
+static void avmcs_release(struct pcmcia_device *link);
/*
The attach() and detach() entry points are used to create and destroy
@@ -65,10 +65,10 @@ static void avmcs_detach(struct pcmcia_device *p_dev);
/*
A linked list of "instances" of the skeleton device. Each actual
PCMCIA card corresponds to one device instance, and is described
- by one dev_link_t structure (defined in ds.h).
+ by one struct pcmcia_device structure (defined in ds.h).
You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of dev_link_t pointers, where minor
+ memory card driver uses an array of struct pcmcia_device pointers, where minor
device numbers are used to derive the corresponding array index.
*/
@@ -78,7 +78,7 @@ static void avmcs_detach(struct pcmcia_device *p_dev);
example, ethernet cards, modems). In other cases, there may be
many actual or logical devices (SCSI adapters, memory cards with
multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a dev_link_t
+ in a linked list starting at the 'dev' field of a struct pcmcia_device
structure. We allocate them in the card's private data structure,
because they generally can't be allocated dynamically.
*/
@@ -99,54 +99,38 @@ typedef struct local_info_t {
======================================================================*/
-static int avmcs_attach(struct pcmcia_device *p_dev)
+static int avmcs_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
local_info_t *local;
- /* Initialize the dev_link_t structure */
- link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
- if (!link)
- goto err;
- memset(link, 0, sizeof(struct dev_link_t));
-
/* The io structure describes IO port mapping */
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts2 = 0;
+ p_dev->io.NumPorts1 = 16;
+ p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ p_dev->io.NumPorts2 = 0;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+
+ p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
-
/* General socket configuration */
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->conf.ConfigIndex = 1;
- link->conf.Present = PRESENT_OPTION;
+ p_dev->conf.Attributes = CONF_ENABLE_IRQ;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
+ p_dev->conf.ConfigIndex = 1;
+ p_dev->conf.Present = PRESENT_OPTION;
/* Allocate space for private device-specific data */
local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local)
- goto err_kfree;
+ goto err;
memset(local, 0, sizeof(local_info_t));
- link->priv = local;
+ p_dev->priv = local;
- link->handle = p_dev;
- p_dev->instance = link;
+ return avmcs_config(p_dev);
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- avmcs_config(link);
-
- return 0;
-
- err_kfree:
- kfree(link);
err:
- return -EINVAL;
+ return -ENOMEM;
} /* avmcs_attach */
/*======================================================================
@@ -158,15 +142,10 @@ static int avmcs_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void avmcs_detach(struct pcmcia_device *p_dev)
+static void avmcs_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
- if (link->state & DEV_CONFIG)
avmcs_release(link);
-
- kfree(link->priv);
- kfree(link);
+ kfree(link->priv);
} /* avmcs_detach */
/*======================================================================
@@ -177,7 +156,7 @@ static void avmcs_detach(struct pcmcia_device *p_dev)
======================================================================*/
-static int get_tuple(client_handle_t handle, tuple_t *tuple,
+static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_tuple_data(handle, tuple);
@@ -185,7 +164,7 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple,
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static int first_tuple(client_handle_t handle, tuple_t *tuple,
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_first_tuple(handle, tuple);
@@ -193,7 +172,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
return get_tuple(handle, tuple, parse);
}
-static int next_tuple(client_handle_t handle, tuple_t *tuple,
+static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_next_tuple(handle, tuple);
@@ -201,9 +180,8 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
return get_tuple(handle, tuple, parse);
}
-static void avmcs_config(dev_link_t *link)
+static int avmcs_config(struct pcmcia_device *link)
{
- client_handle_t handle;
tuple_t tuple;
cisparse_t parse;
cistpl_cftable_entry_t *cf = &parse.cftable_entry;
@@ -213,8 +191,7 @@ static void avmcs_config(dev_link_t *link)
char devname[128];
int cardtype;
int (*addcard)(unsigned int port, unsigned irq);
-
- handle = link->handle;
+
dev = link->priv;
/*
@@ -223,25 +200,21 @@ static void avmcs_config(dev_link_t *link)
*/
do {
tuple.DesiredTuple = CISTPL_CONFIG;
- i = pcmcia_get_first_tuple(handle, &tuple);
+ i = pcmcia_get_first_tuple(link, &tuple);
if (i != CS_SUCCESS) break;
tuple.TupleData = buf;
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
- i = pcmcia_get_tuple_data(handle, &tuple);
+ i = pcmcia_get_tuple_data(link, &tuple);
if (i != CS_SUCCESS) break;
- i = pcmcia_parse_tuple(handle, &tuple, &parse);
+ i = pcmcia_parse_tuple(link, &tuple, &parse);
if (i != CS_SUCCESS) break;
link->conf.ConfigBase = parse.config.base;
} while (0);
if (i != CS_SUCCESS) {
- cs_error(link->handle, ParseTuple, i);
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ cs_error(link, ParseTuple, i);
+ return -ENODEV;
}
-
- /* Configure card */
- link->state |= DEV_CONFIG;
do {
@@ -252,7 +225,7 @@ static void avmcs_config(dev_link_t *link)
tuple.DesiredTuple = CISTPL_VERS_1;
devname[0] = 0;
- if( !first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 1 ) {
+ if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) {
strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1],
sizeof(devname));
}
@@ -263,7 +236,7 @@ static void avmcs_config(dev_link_t *link)
tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i == CS_SUCCESS) {
if (cf->io.nwin > 0) {
link->conf.ConfigIndex = cf->index;
@@ -273,36 +246,36 @@ static void avmcs_config(dev_link_t *link)
printk(KERN_INFO "avm_cs: testing i/o %#x-%#x\n",
link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) goto found_port;
}
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
found_port:
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
break;
}
-
+
/*
* allocate an interrupt line
*/
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIRQ, i);
- pcmcia_release_io(link->handle, &link->io);
+ cs_error(link, RequestIRQ, i);
+ /* undo */
+ pcmcia_disable_device(link);
break;
}
-
+
/*
* configure the PCMCIA socket
*/
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, i);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
+ cs_error(link, RequestConfiguration, i);
+ pcmcia_disable_device(link);
break;
}
@@ -331,13 +304,12 @@ found_port:
dev->node.major = 64;
dev->node.minor = 0;
- link->dev = &dev->node;
-
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &dev->node;
+
/* If any step failed, release any partially configured state */
if (i != 0) {
avmcs_release(link);
- return;
+ return -ENODEV;
}
@@ -351,9 +323,10 @@ found_port:
printk(KERN_ERR "avm_cs: failed to add AVM-%s-Controller at i/o %#x, irq %d\n",
dev->node.dev_name, link->io.BasePort1, link->irq.AssignedIRQ);
avmcs_release(link);
- return;
+ return -ENODEV;
}
dev->node.minor = i;
+ return 0;
} /* avmcs_config */
@@ -365,56 +338,12 @@ found_port:
======================================================================*/
-static void avmcs_release(dev_link_t *link)
+static void avmcs_release(struct pcmcia_device *link)
{
- b1pcmcia_delcard(link->io.BasePort1, link->irq.AssignedIRQ);
-
- /* Unlink the device chain */
- link->dev = NULL;
-
- /* Don't bother checking to see if these succeed or not */
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ b1pcmcia_delcard(link->io.BasePort1, link->irq.AssignedIRQ);
+ pcmcia_disable_device(link);
} /* avmcs_release */
-static int avmcs_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int avmcs_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
-}
-
-/*======================================================================
-
- The card status event handler. Mostly, this schedules other
- stuff to run after an event is received. A CARD_REMOVAL event
- also sets some flags to discourage the net drivers from trying
- to talk to the card any more.
-
- When a CARD_REMOVAL event is received, we immediately set a flag
- to block future accesses to this device. All the functions that
- actually access the device should check this flag to make sure
- the card is still present.
-
-======================================================================*/
-
static struct pcmcia_device_id avmcs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335),
@@ -429,11 +358,9 @@ static struct pcmcia_driver avmcs_driver = {
.drv = {
.name = "avm_cs",
},
- .probe = avmcs_attach,
+ .probe = avmcs_probe,
.remove = avmcs_detach,
.id_table = avmcs_ids,
- .suspend= avmcs_suspend,
- .resume = avmcs_resume,
};
static int __init avmcs_init(void)
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index 969da40c42486..ac28e3278ad91 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -67,8 +67,8 @@ module_param(isdnprot, int, 0);
handler.
*/
-static void avma1cs_config(dev_link_t *link);
-static void avma1cs_release(dev_link_t *link);
+static int avma1cs_config(struct pcmcia_device *link);
+static void avma1cs_release(struct pcmcia_device *link);
/*
The attach() and detach() entry points are used to create and destroy
@@ -82,10 +82,10 @@ static void avma1cs_detach(struct pcmcia_device *p_dev);
/*
A linked list of "instances" of the skeleton device. Each actual
PCMCIA card corresponds to one device instance, and is described
- by one dev_link_t structure (defined in ds.h).
+ by one struct pcmcia_device structure (defined in ds.h).
You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of dev_link_t pointers, where minor
+ memory card driver uses an array of struct pcmcia_device pointers, where minor
device numbers are used to derive the corresponding array index.
*/
@@ -95,7 +95,7 @@ static void avma1cs_detach(struct pcmcia_device *p_dev);
example, ethernet cards, modems). In other cases, there may be
many actual or logical devices (SCSI adapters, memory cards with
multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a dev_link_t
+ in a linked list starting at the 'dev' field of a struct pcmcia_device
structure. We allocate them in the card's private data structure,
because they generally can't be allocated dynamically.
*/
@@ -116,55 +116,40 @@ typedef struct local_info_t {
======================================================================*/
-static int avma1cs_attach(struct pcmcia_device *p_dev)
+static int avma1cs_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
local_info_t *local;
DEBUG(0, "avma1cs_attach()\n");
- /* Initialize the dev_link_t structure */
- link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
- if (!link)
- return -ENOMEM;
- memset(link, 0, sizeof(struct dev_link_t));
-
/* Allocate space for private device-specific data */
local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) {
- kfree(link);
+ if (!local)
return -ENOMEM;
- }
+
memset(local, 0, sizeof(local_info_t));
- link->priv = local;
+ p_dev->priv = local;
/* The io structure describes IO port mapping */
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts2 = 16;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
- link->io.IOAddrLines = 5;
+ p_dev->io.NumPorts1 = 16;
+ p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ p_dev->io.NumPorts2 = 16;
+ p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ p_dev->io.IOAddrLines = 5;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
/* General socket configuration */
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->conf.ConfigIndex = 1;
- link->conf.Present = PRESENT_OPTION;
+ p_dev->conf.Attributes = CONF_ENABLE_IRQ;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
+ p_dev->conf.ConfigIndex = 1;
+ p_dev->conf.Present = PRESENT_OPTION;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- avma1cs_config(link);
-
- return 0;
+ return avma1cs_config(p_dev);
} /* avma1cs_attach */
/*======================================================================
@@ -176,17 +161,11 @@ static int avma1cs_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void avma1cs_detach(struct pcmcia_device *p_dev)
+static void avma1cs_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
- DEBUG(0, "avma1cs_detach(0x%p)\n", link);
-
- if (link->state & DEV_CONFIG)
- avma1cs_release(link);
-
- kfree(link->priv);
- kfree(link);
+ DEBUG(0, "avma1cs_detach(0x%p)\n", link);
+ avma1cs_release(link);
+ kfree(link->priv);
} /* avma1cs_detach */
/*======================================================================
@@ -197,7 +176,7 @@ static void avma1cs_detach(struct pcmcia_device *p_dev)
======================================================================*/
-static int get_tuple(client_handle_t handle, tuple_t *tuple,
+static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_tuple_data(handle, tuple);
@@ -205,7 +184,7 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple,
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static int first_tuple(client_handle_t handle, tuple_t *tuple,
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_first_tuple(handle, tuple);
@@ -213,7 +192,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
return get_tuple(handle, tuple, parse);
}
-static int next_tuple(client_handle_t handle, tuple_t *tuple,
+static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_next_tuple(handle, tuple);
@@ -221,9 +200,8 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
return get_tuple(handle, tuple, parse);
}
-static void avma1cs_config(dev_link_t *link)
+static int avma1cs_config(struct pcmcia_device *link)
{
- client_handle_t handle;
tuple_t tuple;
cisparse_t parse;
cistpl_cftable_entry_t *cf = &parse.cftable_entry;
@@ -233,8 +211,7 @@ static void avma1cs_config(dev_link_t *link)
char devname[128];
IsdnCard_t icard;
int busy = 0;
-
- handle = link->handle;
+
dev = link->priv;
DEBUG(0, "avma1cs_config(0x%p)\n", link);
@@ -245,25 +222,21 @@ static void avma1cs_config(dev_link_t *link)
*/
do {
tuple.DesiredTuple = CISTPL_CONFIG;
- i = pcmcia_get_first_tuple(handle, &tuple);
+ i = pcmcia_get_first_tuple(link, &tuple);
if (i != CS_SUCCESS) break;
tuple.TupleData = buf;
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
- i = pcmcia_get_tuple_data(handle, &tuple);
+ i = pcmcia_get_tuple_data(link, &tuple);
if (i != CS_SUCCESS) break;
- i = pcmcia_parse_tuple(handle, &tuple, &parse);
+ i = pcmcia_parse_tuple(link, &tuple, &parse);
if (i != CS_SUCCESS) break;
link->conf.ConfigBase = parse.config.base;
} while (0);
if (i != CS_SUCCESS) {
- cs_error(link->handle, ParseTuple, i);
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ cs_error(link, ParseTuple, i);
+ return -ENODEV;
}
-
- /* Configure card */
- link->state |= DEV_CONFIG;
do {
@@ -274,7 +247,7 @@ static void avma1cs_config(dev_link_t *link)
tuple.DesiredTuple = CISTPL_VERS_1;
devname[0] = 0;
- if( !first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 1 ) {
+ if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) {
strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1],
sizeof(devname));
}
@@ -285,7 +258,7 @@ static void avma1cs_config(dev_link_t *link)
tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i == CS_SUCCESS) {
if (cf->io.nwin > 0) {
link->conf.ConfigIndex = cf->index;
@@ -295,36 +268,36 @@ static void avma1cs_config(dev_link_t *link)
printk(KERN_INFO "avma1_cs: testing i/o %#x-%#x\n",
link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1 - 1);
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) goto found_port;
}
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
found_port:
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
break;
}
/*
* allocate an interrupt line
*/
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIRQ, i);
- pcmcia_release_io(link->handle, &link->io);
+ cs_error(link, RequestIRQ, i);
+ /* undo */
+ pcmcia_disable_device(link);
break;
}
-
+
/*
* configure the PCMCIA socket
*/
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, i);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
+ cs_error(link, RequestConfiguration, i);
+ pcmcia_disable_device(link);
break;
}
@@ -336,13 +309,12 @@ found_port:
strcpy(dev->node.dev_name, "A1");
dev->node.major = 45;
dev->node.minor = 0;
- link->dev = &dev->node;
-
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &dev->node;
+
/* If any step failed, release any partially configured state */
if (i != 0) {
avma1cs_release(link);
- return;
+ return -ENODEV;
}
printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n",
@@ -357,10 +329,11 @@ found_port:
if (i < 0) {
printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 PCMCIA %d at i/o %#x\n", i, link->io.BasePort1);
avma1cs_release(link);
- return;
+ return -ENODEV;
}
dev->node.minor = i;
+ return 0;
} /* avma1cs_config */
/*======================================================================
@@ -371,47 +344,18 @@ found_port:
======================================================================*/
-static void avma1cs_release(dev_link_t *link)
+static void avma1cs_release(struct pcmcia_device *link)
{
- local_info_t *local = link->priv;
+ local_info_t *local = link->priv;
- DEBUG(0, "avma1cs_release(0x%p)\n", link);
+ DEBUG(0, "avma1cs_release(0x%p)\n", link);
- /* no unregister function with hisax */
- HiSax_closecard(local->node.minor);
+ /* now unregister function with hisax */
+ HiSax_closecard(local->node.minor);
- /* Unlink the device chain */
- link->dev = NULL;
-
- /* Don't bother checking to see if these succeed or not */
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
} /* avma1cs_release */
-static int avma1cs_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int avma1cs_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
-}
-
static struct pcmcia_device_id avma1cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb),
@@ -425,13 +369,11 @@ static struct pcmcia_driver avma1cs_driver = {
.drv = {
.name = "avma1_cs",
},
- .probe = avma1cs_attach,
+ .probe = avma1cs_probe,
.remove = avma1cs_detach,
.id_table = avma1cs_ids,
- .suspend = avma1cs_suspend,
- .resume = avma1cs_resume,
};
-
+
/*====================================================================*/
static int __init init_avma1_cs(void)
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index 062fb8f0739f8..e18e75be8ed30 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -94,8 +94,8 @@ module_param(protocol, int, 0);
handler.
*/
-static void elsa_cs_config(dev_link_t *link);
-static void elsa_cs_release(dev_link_t *link);
+static int elsa_cs_config(struct pcmcia_device *link);
+static void elsa_cs_release(struct pcmcia_device *link);
/*
The attach() and detach() entry points are used to create and destroy
@@ -111,7 +111,7 @@ static void elsa_cs_detach(struct pcmcia_device *p_dev);
example, ethernet cards, modems). In other cases, there may be
many actual or logical devices (SCSI adapters, memory cards with
multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a dev_link_t
+ in a linked list starting at the 'dev' field of a struct pcmcia_device
structure. We allocate them in the card's private data structure,
because they generally shouldn't be allocated dynamically.
In this case, we also provide a flag to indicate if a device is
@@ -121,7 +121,7 @@ static void elsa_cs_detach(struct pcmcia_device *p_dev);
*/
typedef struct local_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
int busy;
int cardnr;
@@ -139,9 +139,8 @@ typedef struct local_info_t {
======================================================================*/
-static int elsa_cs_attach(struct pcmcia_device *p_dev)
+static int elsa_cs_probe(struct pcmcia_device *link)
{
- dev_link_t *link;
local_info_t *local;
DEBUG(0, "elsa_cs_attach()\n");
@@ -150,8 +149,11 @@ static int elsa_cs_attach(struct pcmcia_device *p_dev)
local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local) return -ENOMEM;
memset(local, 0, sizeof(local_info_t));
+
+ local->p_dev = link;
+ link->priv = local;
+
local->cardnr = -1;
- link = &local->link; link->priv = local;
/* Interrupt setup */
link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
@@ -170,16 +172,9 @@ static int elsa_cs_attach(struct pcmcia_device *p_dev)
link->io.IOAddrLines = 3;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- elsa_cs_config(link);
-
- return 0;
+ return elsa_cs_config(link);
} /* elsa_cs_attach */
/*======================================================================
@@ -191,20 +186,16 @@ static int elsa_cs_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void elsa_cs_detach(struct pcmcia_device *p_dev)
+static void elsa_cs_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
- local_info_t *info = link->priv;
+ local_info_t *info = link->priv;
- DEBUG(0, "elsa_cs_detach(0x%p)\n", link);
+ DEBUG(0, "elsa_cs_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG) {
- info->busy = 1;
- elsa_cs_release(link);
- }
-
- kfree(info);
+ info->busy = 1;
+ elsa_cs_release(link);
+ kfree(info);
} /* elsa_cs_detach */
/*======================================================================
@@ -214,7 +205,7 @@ static void elsa_cs_detach(struct pcmcia_device *p_dev)
device available to the system.
======================================================================*/
-static int get_tuple(client_handle_t handle, tuple_t *tuple,
+static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_tuple_data(handle, tuple);
@@ -222,7 +213,7 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple,
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static int first_tuple(client_handle_t handle, tuple_t *tuple,
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_first_tuple(handle, tuple);
@@ -230,7 +221,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
return get_tuple(handle, tuple, parse);
}
-static int next_tuple(client_handle_t handle, tuple_t *tuple,
+static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_next_tuple(handle, tuple);
@@ -238,9 +229,8 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
return get_tuple(handle, tuple, parse);
}
-static void elsa_cs_config(dev_link_t *link)
+static int elsa_cs_config(struct pcmcia_device *link)
{
- client_handle_t handle;
tuple_t tuple;
cisparse_t parse;
local_info_t *dev;
@@ -250,7 +240,6 @@ static void elsa_cs_config(dev_link_t *link)
IsdnCard_t icard;
DEBUG(0, "elsa_config(0x%p)\n", link);
- handle = link->handle;
dev = link->priv;
/*
@@ -262,7 +251,7 @@ static void elsa_cs_config(dev_link_t *link)
tuple.TupleDataMax = 255;
tuple.TupleOffset = 0;
tuple.Attributes = 0;
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
if (i != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
@@ -270,32 +259,29 @@ static void elsa_cs_config(dev_link_t *link)
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i == CS_SUCCESS) {
if ( (cf->io.nwin > 0) && cf->io.win[0].base) {
printk(KERN_INFO "(elsa_cs: looks like the 96 model)\n");
link->conf.ConfigIndex = cf->index;
link->io.BasePort1 = cf->io.win[0].base;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
} else {
printk(KERN_INFO "(elsa_cs: looks like the 97 model)\n");
link->conf.ConfigIndex = cf->index;
for (i = 0, j = 0x2f0; j > 0x100; j -= 0x10) {
link->io.BasePort1 = j;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
break;
}
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
if (i != CS_SUCCESS) {
@@ -303,14 +289,14 @@ static void elsa_cs_config(dev_link_t *link)
goto cs_failed;
}
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
link->irq.AssignedIRQ = 0;
last_fn = RequestIRQ;
goto cs_failed;
}
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
last_fn = RequestConfiguration;
goto cs_failed;
@@ -321,14 +307,11 @@ static void elsa_cs_config(dev_link_t *link)
sprintf(dev->node.dev_name, "elsa");
dev->node.major = dev->node.minor = 0x0;
- link->dev = &dev->node;
+ link->dev_node = &dev->node;
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
- dev->node.dev_name, link->conf.ConfigIndex,
- link->conf.Vcc/10, link->conf.Vcc%10);
- if (link->conf.Vpp1)
- printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ printk(KERN_INFO "%s: index 0x%02x: ",
+ dev->node.dev_name, link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq.AssignedIRQ);
if (link->io.NumPorts1)
@@ -339,8 +322,6 @@ static void elsa_cs_config(dev_link_t *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- link->state &= ~DEV_CONFIG_PENDING;
-
icard.para[0] = link->irq.AssignedIRQ;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
@@ -354,10 +335,11 @@ static void elsa_cs_config(dev_link_t *link)
} else
((local_info_t*)link->priv)->cardnr = i;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, i);
+ cs_error(link, last_fn, i);
elsa_cs_release(link);
+ return -ENODEV;
} /* elsa_cs_config */
/*======================================================================
@@ -368,7 +350,7 @@ cs_failed:
======================================================================*/
-static void elsa_cs_release(dev_link_t *link)
+static void elsa_cs_release(struct pcmcia_device *link)
{
local_info_t *local = link->priv;
@@ -380,39 +362,23 @@ static void elsa_cs_release(dev_link_t *link)
HiSax_closecard(local->cardnr);
}
}
- /* Unlink the device chain */
- link->dev = NULL;
-
- /* Don't bother checking to see if these succeed or not */
- if (link->win)
- pcmcia_release_window(link->win);
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+
+ pcmcia_disable_device(link);
} /* elsa_cs_release */
-static int elsa_suspend(struct pcmcia_device *p_dev)
+static int elsa_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
local_info_t *dev = link->priv;
- link->state |= DEV_SUSPEND;
dev->busy = 1;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
return 0;
}
-static int elsa_resume(struct pcmcia_device *p_dev)
+static int elsa_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
local_info_t *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
dev->busy = 0;
return 0;
@@ -430,7 +396,7 @@ static struct pcmcia_driver elsa_cs_driver = {
.drv = {
.name = "elsa_cs",
},
- .probe = elsa_cs_attach,
+ .probe = elsa_cs_probe,
.remove = elsa_cs_detach,
.id_table = elsa_ids,
.suspend = elsa_suspend,
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 6f5213a18a8d6..9bb18f3f78298 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -95,8 +95,8 @@ module_param(protocol, int, 0);
event handler.
*/
-static void sedlbauer_config(dev_link_t *link);
-static void sedlbauer_release(dev_link_t *link);
+static int sedlbauer_config(struct pcmcia_device *link);
+static void sedlbauer_release(struct pcmcia_device *link);
/*
The attach() and detach() entry points are used to create and destroy
@@ -119,7 +119,7 @@ static void sedlbauer_detach(struct pcmcia_device *p_dev);
example, ethernet cards, modems). In other cases, there may be
many actual or logical devices (SCSI adapters, memory cards with
multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a dev_link_t
+ in a linked list starting at the 'dev' field of a struct pcmcia_device
structure. We allocate them in the card's private data structure,
because they generally shouldn't be allocated dynamically.
@@ -130,7 +130,7 @@ static void sedlbauer_detach(struct pcmcia_device *p_dev);
*/
typedef struct local_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
int stop;
int cardnr;
@@ -148,11 +148,10 @@ typedef struct local_info_t {
======================================================================*/
-static int sedlbauer_attach(struct pcmcia_device *p_dev)
+static int sedlbauer_probe(struct pcmcia_device *link)
{
local_info_t *local;
- dev_link_t *link;
-
+
DEBUG(0, "sedlbauer_attach()\n");
/* Allocate space for private device-specific data */
@@ -160,8 +159,10 @@ static int sedlbauer_attach(struct pcmcia_device *p_dev)
if (!local) return -ENOMEM;
memset(local, 0, sizeof(local_info_t));
local->cardnr = -1;
- link = &local->link; link->priv = local;
-
+
+ local->p_dev = link;
+ link->priv = local;
+
/* Interrupt setup */
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
@@ -182,18 +183,10 @@ static int sedlbauer_attach(struct pcmcia_device *p_dev)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.IOAddrLines = 3;
-
link->conf.Attributes = 0;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- sedlbauer_config(link);
-
- return 0;
+ return sedlbauer_config(link);
} /* sedlbauer_attach */
/*======================================================================
@@ -205,19 +198,15 @@ static int sedlbauer_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void sedlbauer_detach(struct pcmcia_device *p_dev)
+static void sedlbauer_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
- DEBUG(0, "sedlbauer_detach(0x%p)\n", link);
+ DEBUG(0, "sedlbauer_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG) {
- ((local_info_t *)link->priv)->stop = 1;
- sedlbauer_release(link);
- }
+ ((local_info_t *)link->priv)->stop = 1;
+ sedlbauer_release(link);
- /* This points to the parent local_info_t struct */
- kfree(link->priv);
+ /* This points to the parent local_info_t struct */
+ kfree(link->priv);
} /* sedlbauer_detach */
/*======================================================================
@@ -230,9 +219,8 @@ static void sedlbauer_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void sedlbauer_config(dev_link_t *link)
+static int sedlbauer_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
local_info_t *dev = link->priv;
tuple_t tuple;
cisparse_t parse;
@@ -254,18 +242,13 @@ static void sedlbauer_config(dev_link_t *link)
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
-
- /* Configure card */
- link->state |= DEV_CONFIG;
- /* Look up the current Vcc */
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
+ CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
/*
In this loop, we scan the CIS for configuration table entries,
@@ -280,12 +263,12 @@ static void sedlbauer_config(dev_link_t *link)
will only use the CIS to fill in implementation-defined details.
*/
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
cistpl_cftable_entry_t dflt = { 0 };
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
@@ -309,10 +292,10 @@ static void sedlbauer_config(dev_link_t *link)
}
if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
/* Do we need to allocate an interrupt? */
@@ -339,13 +322,13 @@ static void sedlbauer_config(dev_link_t *link)
link->io.NumPorts2 = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
}
/*
Now set up a common memory window, if needed. There is room
- in the dev_link_t structure for one memory window handle,
+ in the struct pcmcia_device structure for one memory window handle,
but if the base addresses need to be saved, or if multiple
windows are needed, the info should go in the private data
structure for this device.
@@ -366,7 +349,7 @@ static void sedlbauer_config(dev_link_t *link)
req.Size = 0x1000;
*/
req.AccessSpeed = 0;
- if (pcmcia_request_window(&link->handle, &req, &link->win) != 0)
+ if (pcmcia_request_window(&link, &req, &link->win) != 0)
goto next_entry;
map.Page = 0; map.CardOffset = mem->win[0].card_addr;
if (pcmcia_map_mem_page(link->win, &map) != 0)
@@ -374,29 +357,25 @@ static void sedlbauer_config(dev_link_t *link)
}
/* If we got this far, we're cool! */
break;
-
+
next_entry:
-/* new in dummy.cs 2001/01/28 MN
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
-*/
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
-
+
/*
Allocate an interrupt line. Note that this does not assign a
handler to the interrupt, unless the 'Handler' member of the
irq structure is initialized.
*/
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
/*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
card and host interface into "Memory and IO" mode.
*/
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
/*
At this point, the dev_node_t structure(s) need to be
@@ -404,14 +383,13 @@ static void sedlbauer_config(dev_link_t *link)
*/
sprintf(dev->node.dev_name, "sedlbauer");
dev->node.major = dev->node.minor = 0;
- link->dev = &dev->node;
+ link->dev_node = &dev->node;
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
- dev->node.dev_name, link->conf.ConfigIndex,
- link->conf.Vcc/10, link->conf.Vcc%10);
- if (link->conf.Vpp1)
- printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ printk(KERN_INFO "%s: index 0x%02x:",
+ dev->node.dev_name, link->conf.ConfigIndex);
+ if (link->conf.Vpp)
+ printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq.AssignedIRQ);
if (link->io.NumPorts1)
@@ -424,8 +402,6 @@ static void sedlbauer_config(dev_link_t *link)
printk(", mem 0x%06lx-0x%06lx", req.Base,
req.Base+req.Size-1);
printk("\n");
-
- link->state &= ~DEV_CONFIG_PENDING;
icard.para[0] = link->irq.AssignedIRQ;
icard.para[1] = link->io.BasePort1;
@@ -437,14 +413,16 @@ static void sedlbauer_config(dev_link_t *link)
printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n",
last_ret, link->io.BasePort1);
sedlbauer_release(link);
+ return -ENODEV;
} else
((local_info_t*)link->priv)->cardnr = last_ret;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
sedlbauer_release(link);
+ return -ENODEV;
} /* sedlbauer_config */
@@ -456,7 +434,7 @@ cs_failed:
======================================================================*/
-static void sedlbauer_release(dev_link_t *link)
+static void sedlbauer_release(struct pcmcia_device *link)
{
local_info_t *local = link->priv;
DEBUG(0, "sedlbauer_release(0x%p)\n", link);
@@ -467,46 +445,23 @@ static void sedlbauer_release(dev_link_t *link)
HiSax_closecard(local->cardnr);
}
}
- /* Unlink the device chain */
- link->dev = NULL;
- /*
- In a normal driver, additional code may be needed to release
- other kernel data structures associated with this device.
- */
-
- /* Don't bother checking to see if these succeed or not */
- if (link->win)
- pcmcia_release_window(link->win);
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- if (link->irq.AssignedIRQ)
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
} /* sedlbauer_release */
-static int sedlbauer_suspend(struct pcmcia_device *p_dev)
+static int sedlbauer_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
local_info_t *dev = link->priv;
- link->state |= DEV_SUSPEND;
dev->stop = 1;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
return 0;
}
-static int sedlbauer_resume(struct pcmcia_device *p_dev)
+static int sedlbauer_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
local_info_t *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
dev->stop = 0;
return 0;
@@ -530,7 +485,7 @@ static struct pcmcia_driver sedlbauer_driver = {
.drv = {
.name = "sedlbauer_cs",
},
- .probe = sedlbauer_attach,
+ .probe = sedlbauer_probe,
.remove = sedlbauer_detach,
.id_table = sedlbauer_ids,
.suspend = sedlbauer_suspend,
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index 4e5c14c7240e1..afcc2aeadb344 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -75,8 +75,8 @@ module_param(protocol, int, 0);
handler.
*/
-static void teles_cs_config(dev_link_t *link);
-static void teles_cs_release(dev_link_t *link);
+static int teles_cs_config(struct pcmcia_device *link);
+static void teles_cs_release(struct pcmcia_device *link);
/*
The attach() and detach() entry points are used to create and destroy
@@ -89,10 +89,10 @@ static void teles_detach(struct pcmcia_device *p_dev);
/*
A linked list of "instances" of the teles_cs device. Each actual
PCMCIA card corresponds to one device instance, and is described
- by one dev_link_t structure (defined in ds.h).
+ by one struct pcmcia_device structure (defined in ds.h).
You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of dev_link_t pointers, where minor
+ memory card driver uses an array of struct pcmcia_device pointers, where minor
device numbers are used to derive the corresponding array index.
*/
@@ -102,7 +102,7 @@ static void teles_detach(struct pcmcia_device *p_dev);
example, ethernet cards, modems). In other cases, there may be
many actual or logical devices (SCSI adapters, memory cards with
multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a dev_link_t
+ in a linked list starting at the 'dev' field of a struct pcmcia_device
structure. We allocate them in the card's private data structure,
because they generally shouldn't be allocated dynamically.
In this case, we also provide a flag to indicate if a device is
@@ -112,7 +112,7 @@ static void teles_detach(struct pcmcia_device *p_dev);
*/
typedef struct local_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
int busy;
int cardnr;
@@ -130,9 +130,8 @@ typedef struct local_info_t {
======================================================================*/
-static int teles_attach(struct pcmcia_device *p_dev)
+static int teles_probe(struct pcmcia_device *link)
{
- dev_link_t *link;
local_info_t *local;
DEBUG(0, "teles_attach()\n");
@@ -142,7 +141,9 @@ static int teles_attach(struct pcmcia_device *p_dev)
if (!local) return -ENOMEM;
memset(local, 0, sizeof(local_info_t));
local->cardnr = -1;
- link = &local->link; link->priv = local;
+
+ local->p_dev = link;
+ link->priv = local;
/* Interrupt setup */
link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
@@ -161,16 +162,9 @@ static int teles_attach(struct pcmcia_device *p_dev)
link->io.IOAddrLines = 5;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- teles_cs_config(link);
-
- return 0;
+ return teles_cs_config(link);
} /* teles_attach */
/*======================================================================
@@ -182,20 +176,16 @@ static int teles_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void teles_detach(struct pcmcia_device *p_dev)
+static void teles_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
- local_info_t *info = link->priv;
-
- DEBUG(0, "teles_detach(0x%p)\n", link);
+ local_info_t *info = link->priv;
- if (link->state & DEV_CONFIG) {
- info->busy = 1;
- teles_cs_release(link);
- }
+ DEBUG(0, "teles_detach(0x%p)\n", link);
- kfree(info);
+ info->busy = 1;
+ teles_cs_release(link);
+ kfree(info);
} /* teles_detach */
/*======================================================================
@@ -205,7 +195,7 @@ static void teles_detach(struct pcmcia_device *p_dev)
device available to the system.
======================================================================*/
-static int get_tuple(client_handle_t handle, tuple_t *tuple,
+static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_tuple_data(handle, tuple);
@@ -213,7 +203,7 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple,
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static int first_tuple(client_handle_t handle, tuple_t *tuple,
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_first_tuple(handle, tuple);
@@ -221,7 +211,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
return get_tuple(handle, tuple, parse);
}
-static int next_tuple(client_handle_t handle, tuple_t *tuple,
+static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i = pcmcia_get_next_tuple(handle, tuple);
@@ -229,9 +219,8 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
return get_tuple(handle, tuple, parse);
}
-static void teles_cs_config(dev_link_t *link)
+static int teles_cs_config(struct pcmcia_device *link)
{
- client_handle_t handle;
tuple_t tuple;
cisparse_t parse;
local_info_t *dev;
@@ -241,7 +230,6 @@ static void teles_cs_config(dev_link_t *link)
IsdnCard_t icard;
DEBUG(0, "teles_config(0x%p)\n", link);
- handle = link->handle;
dev = link->priv;
/*
@@ -253,7 +241,7 @@ static void teles_cs_config(dev_link_t *link)
tuple.TupleDataMax = 255;
tuple.TupleOffset = 0;
tuple.Attributes = 0;
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
if (i != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
@@ -261,32 +249,29 @@ static void teles_cs_config(dev_link_t *link)
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- i = first_tuple(handle, &tuple, &parse);
+ i = first_tuple(link, &tuple, &parse);
while (i == CS_SUCCESS) {
if ( (cf->io.nwin > 0) && cf->io.win[0].base) {
printk(KERN_INFO "(teles_cs: looks like the 96 model)\n");
link->conf.ConfigIndex = cf->index;
link->io.BasePort1 = cf->io.win[0].base;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
} else {
printk(KERN_INFO "(teles_cs: looks like the 97 model)\n");
link->conf.ConfigIndex = cf->index;
for (i = 0, j = 0x2f0; j > 0x100; j -= 0x10) {
link->io.BasePort1 = j;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
break;
}
- i = next_tuple(handle, &tuple, &parse);
+ i = next_tuple(link, &tuple, &parse);
}
if (i != CS_SUCCESS) {
@@ -294,14 +279,14 @@ static void teles_cs_config(dev_link_t *link)
goto cs_failed;
}
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
link->irq.AssignedIRQ = 0;
last_fn = RequestIRQ;
goto cs_failed;
}
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
last_fn = RequestConfiguration;
goto cs_failed;
@@ -312,14 +297,11 @@ static void teles_cs_config(dev_link_t *link)
sprintf(dev->node.dev_name, "teles");
dev->node.major = dev->node.minor = 0x0;
- link->dev = &dev->node;
+ link->dev_node = &dev->node;
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
- dev->node.dev_name, link->conf.ConfigIndex,
- link->conf.Vcc/10, link->conf.Vcc%10);
- if (link->conf.Vpp1)
- printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ printk(KERN_INFO "%s: index 0x%02x:",
+ dev->node.dev_name, link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq.AssignedIRQ);
if (link->io.NumPorts1)
@@ -330,8 +312,6 @@ static void teles_cs_config(dev_link_t *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- link->state &= ~DEV_CONFIG_PENDING;
-
icard.para[0] = link->irq.AssignedIRQ;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
@@ -342,13 +322,16 @@ static void teles_cs_config(dev_link_t *link)
printk(KERN_ERR "teles_cs: failed to initialize Teles PCMCIA %d at i/o %#x\n",
i, link->io.BasePort1);
teles_cs_release(link);
- } else
- ((local_info_t*)link->priv)->cardnr = i;
+ return -ENODEV;
+ }
+
+ ((local_info_t*)link->priv)->cardnr = i;
+ return 0;
- return;
cs_failed:
- cs_error(link->handle, last_fn, i);
+ cs_error(link, last_fn, i);
teles_cs_release(link);
+ return -ENODEV;
} /* teles_cs_config */
/*======================================================================
@@ -359,7 +342,7 @@ cs_failed:
======================================================================*/
-static void teles_cs_release(dev_link_t *link)
+static void teles_cs_release(struct pcmcia_device *link)
{
local_info_t *local = link->priv;
@@ -371,39 +354,23 @@ static void teles_cs_release(dev_link_t *link)
HiSax_closecard(local->cardnr);
}
}
- /* Unlink the device chain */
- link->dev = NULL;
-
- /* Don't bother checking to see if these succeed or not */
- if (link->win)
- pcmcia_release_window(link->win);
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+
+ pcmcia_disable_device(link);
} /* teles_cs_release */
-static int teles_suspend(struct pcmcia_device *p_dev)
+static int teles_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
local_info_t *dev = link->priv;
- link->state |= DEV_SUSPEND;
dev->busy = 1;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
return 0;
}
-static int teles_resume(struct pcmcia_device *p_dev)
+static int teles_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
local_info_t *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
dev->busy = 0;
return 0;
@@ -421,7 +388,7 @@ static struct pcmcia_driver teles_cs_driver = {
.drv = {
.name = "teles_cs",
},
- .probe = teles_attach,
+ .probe = teles_probe,
.remove = teles_detach,
.id_table = teles_ids,
.suspend = teles_suspend,
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
index 94c9afb7017c6..f4f71226a0789 100644
--- a/drivers/isdn/sc/ioctl.c
+++ b/drivers/isdn/sc/ioctl.c
@@ -46,7 +46,8 @@ int sc_ioctl(int card, scs_ioctl *data)
pr_debug("%s: SCIOCRESET: ioctl received\n",
sc_adapter[card]->devicename);
sc_adapter[card]->StartOnReset = 0;
- return (reset(card));
+ kfree(rcvmsg);
+ return reset(card);
}
case SCIOCLOAD:
@@ -183,7 +184,7 @@ int sc_ioctl(int card, scs_ioctl *data)
sc_adapter[card]->devicename);
spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
- if(!spid) {
+ if (!spid) {
kfree(rcvmsg);
return -ENOMEM;
}
@@ -195,10 +196,10 @@ int sc_ioctl(int card, scs_ioctl *data)
if (!status) {
pr_debug("%s: SCIOCGETSPID: command successful\n",
sc_adapter[card]->devicename);
- }
- else {
+ } else {
pr_debug("%s: SCIOCGETSPID: command failed (status = %d)\n",
sc_adapter[card]->devicename, status);
+ kfree(spid);
kfree(rcvmsg);
return status;
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
new file mode 100644
index 0000000000000..2c4f20b7f0215
--- /dev/null
+++ b/drivers/leds/Kconfig
@@ -0,0 +1,77 @@
+
+menu "LED devices"
+
+config NEW_LEDS
+ bool "LED Support"
+ help
+ Say Y to enable Linux LED support. This is not related to standard
+ keyboard LEDs which are controlled via the input system.
+
+config LEDS_CLASS
+ tristate "LED Class Support"
+ depends NEW_LEDS
+ help
+ This option enables the led sysfs class in /sys/class/leds. You'll
+ need this to do anything useful with LEDs. If unsure, say N.
+
+config LEDS_TRIGGERS
+ bool "LED Trigger support"
+ depends NEW_LEDS
+ help
+ This option enables trigger support for the leds class.
+ These triggers allow kernel events to drive the LEDs and can
+ be configured via sysfs. If unsure, say Y.
+
+config LEDS_CORGI
+ tristate "LED Support for the Sharp SL-C7x0 series"
+ depends LEDS_CLASS && PXA_SHARP_C7xx
+ help
+ This option enables support for the LEDs on Sharp Zaurus
+ SL-C7x0 series (C700, C750, C760, C860).
+
+config LEDS_LOCOMO
+ tristate "LED Support for Locomo device"
+ depends LEDS_CLASS && SHARP_LOCOMO
+ help
+ This option enables support for the LEDs on Sharp Locomo.
+ Zaurus models SL-5500 and SL-5600.
+
+config LEDS_SPITZ
+ tristate "LED Support for the Sharp SL-Cxx00 series"
+ depends LEDS_CLASS && PXA_SHARP_Cxx00
+ help
+ This option enables support for the LEDs on Sharp Zaurus
+ SL-Cxx00 series (C1000, C3000, C3100).
+
+config LEDS_IXP4XX
+ tristate "LED Support for GPIO connected LEDs on IXP4XX processors"
+ depends LEDS_CLASS && ARCH_IXP4XX
+ help
+ This option enables support for the LEDs connected to GPIO
+ outputs of the Intel IXP4XX processors. To be useful the
+ particular board must have LEDs and they must be connected
+ to the GPIO lines. If unsure, say Y.
+
+config LEDS_TOSA
+ tristate "LED Support for the Sharp SL-6000 series"
+ depends LEDS_CLASS && PXA_SHARPSL
+ help
+ This option enables support for the LEDs on Sharp Zaurus
+ SL-6000 series.
+
+config LEDS_TRIGGER_TIMER
+ tristate "LED Timer Trigger"
+ depends LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by a programmable timer
+ via sysfs. If unsure, say Y.
+
+config LEDS_TRIGGER_IDE_DISK
+ bool "LED Timer Trigger"
+ depends LEDS_TRIGGERS && BLK_DEV_IDEDISK
+ help
+ This allows LEDs to be controlled by IDE disk activity.
+ If unsure, say Y.
+
+endmenu
+
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
new file mode 100644
index 0000000000000..40699d3cabbf3
--- /dev/null
+++ b/drivers/leds/Makefile
@@ -0,0 +1,16 @@
+
+# LED Core
+obj-$(CONFIG_NEW_LEDS) += led-core.o
+obj-$(CONFIG_LEDS_CLASS) += led-class.o
+obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
+
+# LED Platform Drivers
+obj-$(CONFIG_LEDS_CORGI) += leds-corgi.o
+obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
+obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o
+obj-$(CONFIG_LEDS_IXP4XX) += leds-ixp4xx-gpio.o
+obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o
+
+# LED Triggers
+obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
+obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
new file mode 100644
index 0000000000000..b0b5d05fadd62
--- /dev/null
+++ b/drivers/leds/led-class.c
@@ -0,0 +1,167 @@
+/*
+ * LED Class Core
+ *
+ * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
+ * Copyright (C) 2005-2006 Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+static struct class *leds_class;
+
+static ssize_t led_brightness_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ ssize_t ret = 0;
+
+ /* no lock needed for this */
+ sprintf(buf, "%u\n", led_cdev->brightness);
+ ret = strlen(buf) + 1;
+
+ return ret;
+}
+
+static ssize_t led_brightness_store(struct class_device *dev,
+ const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ ssize_t ret = -EINVAL;
+ char *after;
+ unsigned long state = simple_strtoul(buf, &after, 10);
+
+ if (after - buf > 0) {
+ ret = after - buf;
+ led_set_brightness(led_cdev, state);
+ }
+
+ return ret;
+}
+
+static CLASS_DEVICE_ATTR(brightness, 0644, led_brightness_show,
+ led_brightness_store);
+#ifdef CONFIG_LEDS_TRIGGERS
+static CLASS_DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
+#endif
+
+/**
+ * led_classdev_suspend - suspend an led_classdev.
+ * @led_cdev: the led_classdev to suspend.
+ */
+void led_classdev_suspend(struct led_classdev *led_cdev)
+{
+ led_cdev->flags |= LED_SUSPENDED;
+ led_cdev->brightness_set(led_cdev, 0);
+}
+EXPORT_SYMBOL_GPL(led_classdev_suspend);
+
+/**
+ * led_classdev_resume - resume an led_classdev.
+ * @led_cdev: the led_classdev to resume.
+ */
+void led_classdev_resume(struct led_classdev *led_cdev)
+{
+ led_cdev->brightness_set(led_cdev, led_cdev->brightness);
+ led_cdev->flags &= ~LED_SUSPENDED;
+}
+EXPORT_SYMBOL_GPL(led_classdev_resume);
+
+/**
+ * led_classdev_register - register a new object of led_classdev class.
+ * @dev: The device to register.
+ * @led_cdev: the led_classdev structure for this device.
+ */
+int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
+{
+ led_cdev->class_dev = class_device_create(leds_class, NULL, 0,
+ parent, "%s", led_cdev->name);
+ if (unlikely(IS_ERR(led_cdev->class_dev)))
+ return PTR_ERR(led_cdev->class_dev);
+
+ class_set_devdata(led_cdev->class_dev, led_cdev);
+
+ /* register the attributes */
+ class_device_create_file(led_cdev->class_dev,
+ &class_device_attr_brightness);
+
+ /* add to the list of leds */
+ write_lock(&leds_list_lock);
+ list_add_tail(&led_cdev->node, &leds_list);
+ write_unlock(&leds_list_lock);
+
+#ifdef CONFIG_LEDS_TRIGGERS
+ rwlock_init(&led_cdev->trigger_lock);
+
+ led_trigger_set_default(led_cdev);
+
+ class_device_create_file(led_cdev->class_dev,
+ &class_device_attr_trigger);
+#endif
+
+ printk(KERN_INFO "Registered led device: %s\n",
+ led_cdev->class_dev->class_id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_classdev_register);
+
+/**
+ * led_classdev_unregister - unregisters a object of led_properties class.
+ * @led_cdev: the led device to unreigister
+ *
+ * Unregisters a previously registered via led_classdev_register object.
+ */
+void led_classdev_unregister(struct led_classdev *led_cdev)
+{
+ class_device_remove_file(led_cdev->class_dev,
+ &class_device_attr_brightness);
+#ifdef CONFIG_LEDS_TRIGGERS
+ class_device_remove_file(led_cdev->class_dev,
+ &class_device_attr_trigger);
+ write_lock(&led_cdev->trigger_lock);
+ if (led_cdev->trigger)
+ led_trigger_set(led_cdev, NULL);
+ write_unlock(&led_cdev->trigger_lock);
+#endif
+
+ class_device_unregister(led_cdev->class_dev);
+
+ write_lock(&leds_list_lock);
+ list_del(&led_cdev->node);
+ write_unlock(&leds_list_lock);
+}
+EXPORT_SYMBOL_GPL(led_classdev_unregister);
+
+static int __init leds_init(void)
+{
+ leds_class = class_create(THIS_MODULE, "leds");
+ if (IS_ERR(leds_class))
+ return PTR_ERR(leds_class);
+ return 0;
+}
+
+static void __exit leds_exit(void)
+{
+ class_destroy(leds_class);
+}
+
+subsys_initcall(leds_init);
+module_exit(leds_exit);
+
+MODULE_AUTHOR("John Lenz, Richard Purdie");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LED Class Interface");
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
new file mode 100644
index 0000000000000..fe6541326c717
--- /dev/null
+++ b/drivers/leds/led-core.c
@@ -0,0 +1,25 @@
+/*
+ * LED Class Core
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+rwlock_t leds_list_lock = RW_LOCK_UNLOCKED;
+LIST_HEAD(leds_list);
+
+EXPORT_SYMBOL_GPL(leds_list);
+EXPORT_SYMBOL_GPL(leds_list_lock);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
new file mode 100644
index 0000000000000..5e2cd8be1191b
--- /dev/null
+++ b/drivers/leds/led-triggers.c
@@ -0,0 +1,239 @@
+/*
+ * LED Triggers Core
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+/*
+ * Nests outside led_cdev->trigger_lock
+ */
+static rwlock_t triggers_list_lock = RW_LOCK_UNLOCKED;
+static LIST_HEAD(trigger_list);
+
+ssize_t led_trigger_store(struct class_device *dev, const char *buf,
+ size_t count)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ char trigger_name[TRIG_NAME_MAX];
+ struct led_trigger *trig;
+ size_t len;
+
+ trigger_name[sizeof(trigger_name) - 1] = '\0';
+ strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
+ len = strlen(trigger_name);
+
+ if (len && trigger_name[len - 1] == '\n')
+ trigger_name[len - 1] = '\0';
+
+ if (!strcmp(trigger_name, "none")) {
+ write_lock(&led_cdev->trigger_lock);
+ led_trigger_set(led_cdev, NULL);
+ write_unlock(&led_cdev->trigger_lock);
+ return count;
+ }
+
+ read_lock(&triggers_list_lock);
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ if (!strcmp(trigger_name, trig->name)) {
+ write_lock(&led_cdev->trigger_lock);
+ led_trigger_set(led_cdev, trig);
+ write_unlock(&led_cdev->trigger_lock);
+
+ read_unlock(&triggers_list_lock);
+ return count;
+ }
+ }
+ read_unlock(&triggers_list_lock);
+
+ return -EINVAL;
+}
+
+
+ssize_t led_trigger_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_trigger *trig;
+ int len = 0;
+
+ read_lock(&triggers_list_lock);
+ read_lock(&led_cdev->trigger_lock);
+
+ if (!led_cdev->trigger)
+ len += sprintf(buf+len, "[none] ");
+ else
+ len += sprintf(buf+len, "none ");
+
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
+ trig->name))
+ len += sprintf(buf+len, "[%s] ", trig->name);
+ else
+ len += sprintf(buf+len, "%s ", trig->name);
+ }
+ read_unlock(&led_cdev->trigger_lock);
+ read_unlock(&triggers_list_lock);
+
+ len += sprintf(len+buf, "\n");
+ return len;
+}
+
+void led_trigger_event(struct led_trigger *trigger,
+ enum led_brightness brightness)
+{
+ struct list_head *entry;
+
+ if (!trigger)
+ return;
+
+ read_lock(&trigger->leddev_list_lock);
+ list_for_each(entry, &trigger->led_cdevs) {
+ struct led_classdev *led_cdev;
+
+ led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ led_set_brightness(led_cdev, brightness);
+ }
+ read_unlock(&trigger->leddev_list_lock);
+}
+
+/* Caller must ensure led_cdev->trigger_lock held */
+void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
+{
+ unsigned long flags;
+
+ /* Remove any existing trigger */
+ if (led_cdev->trigger) {
+ write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
+ list_del(&led_cdev->trig_list);
+ write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ if (led_cdev->trigger->deactivate)
+ led_cdev->trigger->deactivate(led_cdev);
+ }
+ if (trigger) {
+ write_lock_irqsave(&trigger->leddev_list_lock, flags);
+ list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
+ write_unlock_irqrestore(&trigger->leddev_list_lock, flags);
+ if (trigger->activate)
+ trigger->activate(led_cdev);
+ }
+ led_cdev->trigger = trigger;
+}
+
+void led_trigger_set_default(struct led_classdev *led_cdev)
+{
+ struct led_trigger *trig;
+
+ if (!led_cdev->default_trigger)
+ return;
+
+ read_lock(&triggers_list_lock);
+ write_lock(&led_cdev->trigger_lock);
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ if (!strcmp(led_cdev->default_trigger, trig->name))
+ led_trigger_set(led_cdev, trig);
+ }
+ write_unlock(&led_cdev->trigger_lock);
+ read_unlock(&triggers_list_lock);
+}
+
+int led_trigger_register(struct led_trigger *trigger)
+{
+ struct led_classdev *led_cdev;
+
+ rwlock_init(&trigger->leddev_list_lock);
+ INIT_LIST_HEAD(&trigger->led_cdevs);
+
+ /* Add to the list of led triggers */
+ write_lock(&triggers_list_lock);
+ list_add_tail(&trigger->next_trig, &trigger_list);
+ write_unlock(&triggers_list_lock);
+
+ /* Register with any LEDs that have this as a default trigger */
+ read_lock(&leds_list_lock);
+ list_for_each_entry(led_cdev, &leds_list, node) {
+ write_lock(&led_cdev->trigger_lock);
+ if (!led_cdev->trigger && led_cdev->default_trigger &&
+ !strcmp(led_cdev->default_trigger, trigger->name))
+ led_trigger_set(led_cdev, trigger);
+ write_unlock(&led_cdev->trigger_lock);
+ }
+ read_unlock(&leds_list_lock);
+
+ return 0;
+}
+
+void led_trigger_register_simple(const char *name, struct led_trigger **tp)
+{
+ struct led_trigger *trigger;
+
+ trigger = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
+
+ if (trigger) {
+ trigger->name = name;
+ led_trigger_register(trigger);
+ }
+ *tp = trigger;
+}
+
+void led_trigger_unregister(struct led_trigger *trigger)
+{
+ struct led_classdev *led_cdev;
+
+ /* Remove from the list of led triggers */
+ write_lock(&triggers_list_lock);
+ list_del(&trigger->next_trig);
+ write_unlock(&triggers_list_lock);
+
+ /* Remove anyone actively using this trigger */
+ read_lock(&leds_list_lock);
+ list_for_each_entry(led_cdev, &leds_list, node) {
+ write_lock(&led_cdev->trigger_lock);
+ if (led_cdev->trigger == trigger)
+ led_trigger_set(led_cdev, NULL);
+ write_unlock(&led_cdev->trigger_lock);
+ }
+ read_unlock(&leds_list_lock);
+}
+
+void led_trigger_unregister_simple(struct led_trigger *trigger)
+{
+ led_trigger_unregister(trigger);
+ kfree(trigger);
+}
+
+/* Used by LED Class */
+EXPORT_SYMBOL_GPL(led_trigger_set);
+EXPORT_SYMBOL_GPL(led_trigger_set_default);
+EXPORT_SYMBOL_GPL(led_trigger_show);
+EXPORT_SYMBOL_GPL(led_trigger_store);
+
+/* LED Trigger Interface */
+EXPORT_SYMBOL_GPL(led_trigger_register);
+EXPORT_SYMBOL_GPL(led_trigger_unregister);
+
+/* Simple LED Tigger Interface */
+EXPORT_SYMBOL_GPL(led_trigger_register_simple);
+EXPORT_SYMBOL_GPL(led_trigger_unregister_simple);
+EXPORT_SYMBOL_GPL(led_trigger_event);
+
+MODULE_AUTHOR("Richard Purdie");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LED Triggers Core");
diff --git a/drivers/leds/leds-corgi.c b/drivers/leds/leds-corgi.c
new file mode 100644
index 0000000000000..bb7d84df0121d
--- /dev/null
+++ b/drivers/leds/leds-corgi.c
@@ -0,0 +1,121 @@
+/*
+ * LED Triggers Core
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <asm/mach-types.h>
+#include <asm/arch/corgi.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/hardware/scoop.h>
+
+static void corgiled_amber_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ if (value)
+ GPSR0 = GPIO_bit(CORGI_GPIO_LED_ORANGE);
+ else
+ GPCR0 = GPIO_bit(CORGI_GPIO_LED_ORANGE);
+}
+
+static void corgiled_green_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_LED_GREEN);
+ else
+ reset_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_LED_GREEN);
+}
+
+static struct led_classdev corgi_amber_led = {
+ .name = "corgi:amber",
+ .default_trigger = "sharpsl-charge",
+ .brightness_set = corgiled_amber_set,
+};
+
+static struct led_classdev corgi_green_led = {
+ .name = "corgi:green",
+ .default_trigger = "nand-disk",
+ .brightness_set = corgiled_green_set,
+};
+
+#ifdef CONFIG_PM
+static int corgiled_suspend(struct platform_device *dev, pm_message_t state)
+{
+#ifdef CONFIG_LEDS_TRIGGERS
+ if (corgi_amber_led.trigger && strcmp(corgi_amber_led.trigger->name, "sharpsl-charge"))
+#endif
+ led_classdev_suspend(&corgi_amber_led);
+ led_classdev_suspend(&corgi_green_led);
+ return 0;
+}
+
+static int corgiled_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&corgi_amber_led);
+ led_classdev_resume(&corgi_green_led);
+ return 0;
+}
+#endif
+
+static int corgiled_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = led_classdev_register(&pdev->dev, &corgi_amber_led);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&pdev->dev, &corgi_green_led);
+ if (ret < 0)
+ led_classdev_unregister(&corgi_amber_led);
+
+ return ret;
+}
+
+static int corgiled_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&corgi_amber_led);
+ led_classdev_unregister(&corgi_green_led);
+ return 0;
+}
+
+static struct platform_driver corgiled_driver = {
+ .probe = corgiled_probe,
+ .remove = corgiled_remove,
+#ifdef CONFIG_PM
+ .suspend = corgiled_suspend,
+ .resume = corgiled_resume,
+#endif
+ .driver = {
+ .name = "corgi-led",
+ },
+};
+
+static int __init corgiled_init(void)
+{
+ return platform_driver_register(&corgiled_driver);
+}
+
+static void __exit corgiled_exit(void)
+{
+ platform_driver_unregister(&corgiled_driver);
+}
+
+module_init(corgiled_init);
+module_exit(corgiled_exit);
+
+MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
+MODULE_DESCRIPTION("Corgi LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-ixp4xx-gpio.c b/drivers/leds/leds-ixp4xx-gpio.c
new file mode 100644
index 0000000000000..30ced150e4cfb
--- /dev/null
+++ b/drivers/leds/leds-ixp4xx-gpio.c
@@ -0,0 +1,215 @@
+/*
+ * IXP4XX GPIO driver LED driver
+ *
+ * Author: John Bowler <jbowler@acm.org>
+ *
+ * Copyright (c) 2006 John Bowler
+ *
+ * Permission is hereby granted, free of charge, to any
+ * person obtaining a copy of this software and associated
+ * documentation files (the "Software"), to deal in the
+ * Software without restriction, including without
+ * limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the
+ * following conditions:
+ *
+ * The above copyright notice and this permission notice
+ * shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+ * TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+ * SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/leds.h>
+#include <asm/arch/hardware.h>
+
+extern spinlock_t gpio_lock;
+
+/* Up to 16 gpio lines are possible. */
+#define GPIO_MAX 16
+static struct ixp4xxgpioled_device {
+ struct led_classdev ancestor;
+ int flags;
+} ixp4xxgpioled_devices[GPIO_MAX];
+
+void ixp4xxgpioled_brightness_set(struct led_classdev *pled,
+ enum led_brightness value)
+{
+ const struct ixp4xxgpioled_device *const ixp4xx_dev =
+ container_of(pled, struct ixp4xxgpioled_device, ancestor);
+ const u32 gpio_pin = ixp4xx_dev - ixp4xxgpioled_devices;
+
+ if (gpio_pin < GPIO_MAX && ixp4xx_dev->ancestor.name != 0) {
+ /* Set or clear the 'gpio_pin' bit according to the style
+ * and the required setting (value > 0 == on)
+ */
+ const int gpio_value =
+ (value > 0) == (ixp4xx_dev->flags != IXP4XX_GPIO_LOW) ?
+ IXP4XX_GPIO_HIGH : IXP4XX_GPIO_LOW;
+
+ {
+ unsigned long flags;
+ spin_lock_irqsave(&gpio_lock, flags);
+ gpio_line_set(gpio_pin, gpio_value);
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ }
+ }
+}
+
+/* LEDs are described in resources, the following iterates over the valid
+ * LED resources.
+ */
+#define for_all_leds(i, pdev) \
+ for (i=0; i<pdev->num_resources; ++i) \
+ if (pdev->resource[i].start < GPIO_MAX && \
+ pdev->resource[i].name != 0)
+
+/* The following applies 'operation' to each LED from the given platform,
+ * the function always returns 0 to allow tail call elimination.
+ */
+static int apply_to_all_leds(struct platform_device *pdev,
+ void (*operation)(struct led_classdev *pled))
+{
+ int i;
+
+ for_all_leds(i, pdev)
+ operation(&ixp4xxgpioled_devices[pdev->resource[i].start].ancestor);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixp4xxgpioled_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ return apply_to_all_leds(pdev, led_classdev_suspend);
+}
+
+static int ixp4xxgpioled_resume(struct platform_device *pdev)
+{
+ return apply_to_all_leds(pdev, led_classdev_resume);
+}
+#endif
+
+static void ixp4xxgpioled_remove_one_led(struct led_classdev *pled)
+{
+ led_classdev_unregister(pled);
+ pled->name = 0;
+}
+
+static int ixp4xxgpioled_remove(struct platform_device *pdev)
+{
+ return apply_to_all_leds(pdev, ixp4xxgpioled_remove_one_led);
+}
+
+static int ixp4xxgpioled_probe(struct platform_device *pdev)
+{
+ /* The board level has to tell the driver where the
+ * LEDs are connected - there is no way to find out
+ * electrically. It must also say whether the GPIO
+ * lines are active high or active low.
+ *
+ * To do this read the num_resources (the number of
+ * LEDs) and the struct resource (the data for each
+ * LED). The name comes from the resource, and it
+ * isn't copied.
+ */
+ int i;
+
+ for_all_leds(i, pdev) {
+ const u8 gpio_pin = pdev->resource[i].start;
+ int rc;
+
+ if (ixp4xxgpioled_devices[gpio_pin].ancestor.name == 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+ gpio_line_config(gpio_pin, IXP4XX_GPIO_OUT);
+ /* The config can, apparently, reset the state,
+ * I suspect the gpio line may be an input and
+ * the config may cause the line to be latched,
+ * so the setting depends on how the LED is
+ * connected to the line (which affects how it
+ * floats if not driven).
+ */
+ gpio_line_set(gpio_pin, IXP4XX_GPIO_HIGH);
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ ixp4xxgpioled_devices[gpio_pin].flags =
+ pdev->resource[i].flags & IORESOURCE_BITS;
+
+ ixp4xxgpioled_devices[gpio_pin].ancestor.name =
+ pdev->resource[i].name;
+
+ /* This is how a board manufacturer makes the LED
+ * come on on reset - the GPIO line will be high, so
+ * make the LED light when the line is low...
+ */
+ if (ixp4xxgpioled_devices[gpio_pin].flags != IXP4XX_GPIO_LOW)
+ ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 100;
+ else
+ ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 0;
+
+ ixp4xxgpioled_devices[gpio_pin].ancestor.flags = 0;
+
+ ixp4xxgpioled_devices[gpio_pin].ancestor.brightness_set =
+ ixp4xxgpioled_brightness_set;
+
+ ixp4xxgpioled_devices[gpio_pin].ancestor.default_trigger = 0;
+ }
+
+ rc = led_classdev_register(&pdev->dev,
+ &ixp4xxgpioled_devices[gpio_pin].ancestor);
+ if (rc < 0) {
+ ixp4xxgpioled_devices[gpio_pin].ancestor.name = 0;
+ ixp4xxgpioled_remove(pdev);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver ixp4xxgpioled_driver = {
+ .probe = ixp4xxgpioled_probe,
+ .remove = ixp4xxgpioled_remove,
+#ifdef CONFIG_PM
+ .suspend = ixp4xxgpioled_suspend,
+ .resume = ixp4xxgpioled_resume,
+#endif
+ .driver = {
+ .name = "IXP4XX-GPIO-LED",
+ },
+};
+
+static int __init ixp4xxgpioled_init(void)
+{
+ return platform_driver_register(&ixp4xxgpioled_driver);
+}
+
+static void __exit ixp4xxgpioled_exit(void)
+{
+ platform_driver_unregister(&ixp4xxgpioled_driver);
+}
+
+module_init(ixp4xxgpioled_init);
+module_exit(ixp4xxgpioled_exit);
+
+MODULE_AUTHOR("John Bowler <jbowler@acm.org>");
+MODULE_DESCRIPTION("IXP4XX GPIO LED driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
new file mode 100644
index 0000000000000..749a86c2adb65
--- /dev/null
+++ b/drivers/leds/leds-locomo.c
@@ -0,0 +1,95 @@
+/*
+ * linux/drivers/leds/locomo.c
+ *
+ * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+
+#include <asm/hardware.h>
+#include <asm/hardware/locomo.h>
+
+static void locomoled_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value, int offset)
+{
+ struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->class_dev->dev);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (value)
+ locomo_writel(LOCOMO_LPT_TOFH, locomo_dev->mapbase + offset);
+ else
+ locomo_writel(LOCOMO_LPT_TOFL, locomo_dev->mapbase + offset);
+ local_irq_restore(flags);
+}
+
+static void locomoled_brightness_set0(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ locomoled_brightness_set(led_cdev, value, LOCOMO_LPT0);
+}
+
+static void locomoled_brightness_set1(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ locomoled_brightness_set(led_cdev, value, LOCOMO_LPT1);
+}
+
+static struct led_classdev locomo_led0 = {
+ .name = "locomo:amber",
+ .brightness_set = locomoled_brightness_set0,
+};
+
+static struct led_classdev locomo_led1 = {
+ .name = "locomo:green",
+ .brightness_set = locomoled_brightness_set1,
+};
+
+static int locomoled_probe(struct locomo_dev *ldev)
+{
+ int ret;
+
+ ret = led_classdev_register(&ldev->dev, &locomo_led0);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&ldev->dev, &locomo_led1);
+ if (ret < 0)
+ led_classdev_unregister(&locomo_led0);
+
+ return ret;
+}
+
+static int locomoled_remove(struct locomo_dev *dev)
+{
+ led_classdev_unregister(&locomo_led0);
+ led_classdev_unregister(&locomo_led1);
+ return 0;
+}
+
+static struct locomo_driver locomoled_driver = {
+ .drv = {
+ .name = "locomoled"
+ },
+ .devid = LOCOMO_DEVID_LED,
+ .probe = locomoled_probe,
+ .remove = locomoled_remove,
+};
+
+static int __init locomoled_init(void)
+{
+ return locomo_driver_register(&locomoled_driver);
+}
+module_init(locomoled_init);
+
+MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
+MODULE_DESCRIPTION("Locomo LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-spitz.c b/drivers/leds/leds-spitz.c
new file mode 100644
index 0000000000000..65bbef4a5e091
--- /dev/null
+++ b/drivers/leds/leds-spitz.c
@@ -0,0 +1,125 @@
+/*
+ * LED Triggers Core
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <asm/hardware/scoop.h>
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/spitz.h>
+
+static void spitzled_amber_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_ORANGE);
+ else
+ reset_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_ORANGE);
+}
+
+static void spitzled_green_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_GREEN);
+ else
+ reset_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_GREEN);
+}
+
+static struct led_classdev spitz_amber_led = {
+ .name = "spitz:amber",
+ .default_trigger = "sharpsl-charge",
+ .brightness_set = spitzled_amber_set,
+};
+
+static struct led_classdev spitz_green_led = {
+ .name = "spitz:green",
+ .default_trigger = "ide-disk",
+ .brightness_set = spitzled_green_set,
+};
+
+#ifdef CONFIG_PM
+static int spitzled_suspend(struct platform_device *dev, pm_message_t state)
+{
+#ifdef CONFIG_LEDS_TRIGGERS
+ if (spitz_amber_led.trigger && strcmp(spitz_amber_led.trigger->name, "sharpsl-charge"))
+#endif
+ led_classdev_suspend(&spitz_amber_led);
+ led_classdev_suspend(&spitz_green_led);
+ return 0;
+}
+
+static int spitzled_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&spitz_amber_led);
+ led_classdev_resume(&spitz_green_led);
+ return 0;
+}
+#endif
+
+static int spitzled_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (machine_is_akita())
+ spitz_green_led.default_trigger = "nand-disk";
+
+ ret = led_classdev_register(&pdev->dev, &spitz_amber_led);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&pdev->dev, &spitz_green_led);
+ if (ret < 0)
+ led_classdev_unregister(&spitz_amber_led);
+
+ return ret;
+}
+
+static int spitzled_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&spitz_amber_led);
+ led_classdev_unregister(&spitz_green_led);
+
+ return 0;
+}
+
+static struct platform_driver spitzled_driver = {
+ .probe = spitzled_probe,
+ .remove = spitzled_remove,
+#ifdef CONFIG_PM
+ .suspend = spitzled_suspend,
+ .resume = spitzled_resume,
+#endif
+ .driver = {
+ .name = "spitz-led",
+ },
+};
+
+static int __init spitzled_init(void)
+{
+ return platform_driver_register(&spitzled_driver);
+}
+
+static void __exit spitzled_exit(void)
+{
+ platform_driver_unregister(&spitzled_driver);
+}
+
+module_init(spitzled_init);
+module_exit(spitzled_exit);
+
+MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
+MODULE_DESCRIPTION("Spitz LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-tosa.c b/drivers/leds/leds-tosa.c
new file mode 100644
index 0000000000000..c9e8cc1ec4813
--- /dev/null
+++ b/drivers/leds/leds-tosa.c
@@ -0,0 +1,131 @@
+/*
+ * LED Triggers Core
+ *
+ * Copyright 2005 Dirk Opfer
+ *
+ * Author: Dirk Opfer <Dirk@Opfer-Online.de>
+ * based on spitz.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <asm/hardware/scoop.h>
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/tosa.h>
+
+static void tosaled_amber_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&tosascoop_jc_device.dev,
+ TOSA_SCOOP_JC_CHRG_ERR_LED);
+ else
+ reset_scoop_gpio(&tosascoop_jc_device.dev,
+ TOSA_SCOOP_JC_CHRG_ERR_LED);
+}
+
+static void tosaled_green_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&tosascoop_jc_device.dev,
+ TOSA_SCOOP_JC_NOTE_LED);
+ else
+ reset_scoop_gpio(&tosascoop_jc_device.dev,
+ TOSA_SCOOP_JC_NOTE_LED);
+}
+
+static struct led_classdev tosa_amber_led = {
+ .name = "tosa:amber",
+ .default_trigger = "sharpsl-charge",
+ .brightness_set = tosaled_amber_set,
+};
+
+static struct led_classdev tosa_green_led = {
+ .name = "tosa:green",
+ .default_trigger = "nand-disk",
+ .brightness_set = tosaled_green_set,
+};
+
+#ifdef CONFIG_PM
+static int tosaled_suspend(struct platform_device *dev, pm_message_t state)
+{
+#ifdef CONFIG_LEDS_TRIGGERS
+ if (tosa_amber_led.trigger && strcmp(tosa_amber_led.trigger->name,
+ "sharpsl-charge"))
+#endif
+ led_classdev_suspend(&tosa_amber_led);
+ led_classdev_suspend(&tosa_green_led);
+ return 0;
+}
+
+static int tosaled_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&tosa_amber_led);
+ led_classdev_resume(&tosa_green_led);
+ return 0;
+}
+#else
+#define tosaled_suspend NULL
+#define tosaled_resume NULL
+#endif
+
+static int tosaled_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = led_classdev_register(&pdev->dev, &tosa_amber_led);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&pdev->dev, &tosa_green_led);
+ if (ret < 0)
+ led_classdev_unregister(&tosa_amber_led);
+
+ return ret;
+}
+
+static int tosaled_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&tosa_amber_led);
+ led_classdev_unregister(&tosa_green_led);
+
+ return 0;
+}
+
+static struct platform_driver tosaled_driver = {
+ .probe = tosaled_probe,
+ .remove = tosaled_remove,
+ .suspend = tosaled_suspend,
+ .resume = tosaled_resume,
+ .driver = {
+ .name = "tosa-led",
+ },
+};
+
+static int __init tosaled_init(void)
+{
+ return platform_driver_register(&tosaled_driver);
+}
+
+static void __exit tosaled_exit(void)
+{
+ platform_driver_unregister(&tosaled_driver);
+}
+
+module_init(tosaled_init);
+module_exit(tosaled_exit);
+
+MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
+MODULE_DESCRIPTION("Tosa LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
new file mode 100644
index 0000000000000..a715c4ed93ff0
--- /dev/null
+++ b/drivers/leds/leds.h
@@ -0,0 +1,44 @@
+/*
+ * LED Core
+ *
+ * Copyright 2005 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __LEDS_H_INCLUDED
+#define __LEDS_H_INCLUDED
+
+#include <linux/leds.h>
+
+static inline void led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value > LED_FULL)
+ value = LED_FULL;
+ led_cdev->brightness = value;
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ led_cdev->brightness_set(led_cdev, value);
+}
+
+extern rwlock_t leds_list_lock;
+extern struct list_head leds_list;
+
+#ifdef CONFIG_LEDS_TRIGGERS
+void led_trigger_set_default(struct led_classdev *led_cdev);
+void led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger);
+#else
+#define led_trigger_set_default(x) do {} while(0)
+#define led_trigger_set(x, y) do {} while(0)
+#endif
+
+ssize_t led_trigger_store(struct class_device *dev, const char *buf,
+ size_t count);
+ssize_t led_trigger_show(struct class_device *dev, char *buf);
+
+#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/ledtrig-ide-disk.c b/drivers/leds/ledtrig-ide-disk.c
new file mode 100644
index 0000000000000..fa651886ab4fc
--- /dev/null
+++ b/drivers/leds/ledtrig-ide-disk.c
@@ -0,0 +1,62 @@
+/*
+ * LED IDE-Disk Activity Trigger
+ *
+ * Copyright 2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+
+static void ledtrig_ide_timerfunc(unsigned long data);
+
+DEFINE_LED_TRIGGER(ledtrig_ide);
+static DEFINE_TIMER(ledtrig_ide_timer, ledtrig_ide_timerfunc, 0, 0);
+static int ide_activity;
+static int ide_lastactivity;
+
+void ledtrig_ide_activity(void)
+{
+ ide_activity++;
+ if (!timer_pending(&ledtrig_ide_timer))
+ mod_timer(&ledtrig_ide_timer, jiffies + msecs_to_jiffies(10));
+}
+EXPORT_SYMBOL(ledtrig_ide_activity);
+
+static void ledtrig_ide_timerfunc(unsigned long data)
+{
+ if (ide_lastactivity != ide_activity) {
+ ide_lastactivity = ide_activity;
+ led_trigger_event(ledtrig_ide, LED_FULL);
+ mod_timer(&ledtrig_ide_timer, jiffies + msecs_to_jiffies(10));
+ } else {
+ led_trigger_event(ledtrig_ide, LED_OFF);
+ }
+}
+
+static int __init ledtrig_ide_init(void)
+{
+ led_trigger_register_simple("ide-disk", &ledtrig_ide);
+ return 0;
+}
+
+static void __exit ledtrig_ide_exit(void)
+{
+ led_trigger_unregister_simple(ledtrig_ide);
+}
+
+module_init(ledtrig_ide_init);
+module_exit(ledtrig_ide_exit);
+
+MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
+MODULE_DESCRIPTION("LED IDE Disk Activity Trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
new file mode 100644
index 0000000000000..f484b5d6dbf86
--- /dev/null
+++ b/drivers/leds/ledtrig-timer.c
@@ -0,0 +1,170 @@
+/*
+ * LED Kernel Timer Trigger
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+struct timer_trig_data {
+ unsigned long delay_on; /* milliseconds on */
+ unsigned long delay_off; /* milliseconds off */
+ struct timer_list timer;
+};
+
+static void led_timer_function(unsigned long data)
+{
+ struct led_classdev *led_cdev = (struct led_classdev *) data;
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+ unsigned long brightness = LED_OFF;
+ unsigned long delay = timer_data->delay_off;
+
+ if (!timer_data->delay_on || !timer_data->delay_off) {
+ led_set_brightness(led_cdev, LED_OFF);
+ return;
+ }
+
+ if (!led_cdev->brightness) {
+ brightness = LED_FULL;
+ delay = timer_data->delay_on;
+ }
+
+ led_set_brightness(led_cdev, brightness);
+
+ mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static ssize_t led_delay_on_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+
+ sprintf(buf, "%lu\n", timer_data->delay_on);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t led_delay_on_store(struct class_device *dev, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+ int ret = -EINVAL;
+ char *after;
+ unsigned long state = simple_strtoul(buf, &after, 10);
+
+ if (after - buf > 0) {
+ timer_data->delay_on = state;
+ mod_timer(&timer_data->timer, jiffies + 1);
+ ret = after - buf;
+ }
+
+ return ret;
+}
+
+static ssize_t led_delay_off_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+
+ sprintf(buf, "%lu\n", timer_data->delay_off);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t led_delay_off_store(struct class_device *dev, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+ int ret = -EINVAL;
+ char *after;
+ unsigned long state = simple_strtoul(buf, &after, 10);
+
+ if (after - buf > 0) {
+ timer_data->delay_off = state;
+ mod_timer(&timer_data->timer, jiffies + 1);
+ ret = after - buf;
+ }
+
+ return ret;
+}
+
+static CLASS_DEVICE_ATTR(delay_on, 0644, led_delay_on_show,
+ led_delay_on_store);
+static CLASS_DEVICE_ATTR(delay_off, 0644, led_delay_off_show,
+ led_delay_off_store);
+
+static void timer_trig_activate(struct led_classdev *led_cdev)
+{
+ struct timer_trig_data *timer_data;
+
+ timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL);
+ if (!timer_data)
+ return;
+
+ led_cdev->trigger_data = timer_data;
+
+ init_timer(&timer_data->timer);
+ timer_data->timer.function = led_timer_function;
+ timer_data->timer.data = (unsigned long) led_cdev;
+
+ class_device_create_file(led_cdev->class_dev,
+ &class_device_attr_delay_on);
+ class_device_create_file(led_cdev->class_dev,
+ &class_device_attr_delay_off);
+}
+
+static void timer_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+
+ if (timer_data) {
+ class_device_remove_file(led_cdev->class_dev,
+ &class_device_attr_delay_on);
+ class_device_remove_file(led_cdev->class_dev,
+ &class_device_attr_delay_off);
+ del_timer_sync(&timer_data->timer);
+ kfree(timer_data);
+ }
+}
+
+static struct led_trigger timer_led_trigger = {
+ .name = "timer",
+ .activate = timer_trig_activate,
+ .deactivate = timer_trig_deactivate,
+};
+
+static int __init timer_trig_init(void)
+{
+ return led_trigger_register(&timer_led_trigger);
+}
+
+static void __exit timer_trig_exit(void)
+{
+ led_trigger_unregister(&timer_led_trigger);
+}
+
+module_init(timer_trig_init);
+module_exit(timer_trig_exit);
+
+MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
+MODULE_DESCRIPTION("Timer LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index aecd9e0c2616b..64fd8e79ea4c9 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -78,8 +78,7 @@ void dm_put_target_type(struct target_type *t)
if (--ti->use == 0)
module_put(ti->tt.module);
- if (ti->use < 0)
- BUG();
+ BUG_ON(ti->use < 0);
up_read(&_lock);
return;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 039e071c10074..1ed5152db4506 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,13 +215,11 @@ static void mddev_put(mddev_t *mddev)
return;
if (!mddev->raid_disks && list_empty(&mddev->disks)) {
list_del(&mddev->all_mddevs);
- /* that blocks */
+ spin_unlock(&all_mddevs_lock);
blk_cleanup_queue(mddev->queue);
- /* that also blocks */
kobject_unregister(&mddev->kobj);
- /* result blows... */
- }
- spin_unlock(&all_mddevs_lock);
+ } else
+ spin_unlock(&all_mddevs_lock);
}
static mddev_t * mddev_find(dev_t unit)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3cb0872a845d6..6081941de1b34 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1135,8 +1135,19 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
mirror = i;
break;
}
- if (!uptodate)
+ if (!uptodate) {
+ int sync_blocks = 0;
+ sector_t s = r1_bio->sector;
+ long sectors_to_go = r1_bio->sectors;
+ /* make sure these bits doesn't get cleared. */
+ do {
+ bitmap_end_sync(mddev->bitmap, r1_bio->sector,
+ &sync_blocks, 1);
+ s += sync_blocks;
+ sectors_to_go -= sync_blocks;
+ } while (sectors_to_go > 0);
md_error(mddev, conf->mirrors[mirror].rdev);
+ }
update_head_pos(mirror, r1_bio);
@@ -1547,8 +1558,7 @@ static int init_resync(conf_t *conf)
int buffs;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
- if (conf->r1buf_pool)
- BUG();
+ BUG_ON(conf->r1buf_pool);
conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
conf->poolinfo);
if (!conf->r1buf_pool)
@@ -1721,8 +1731,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
- if (sync_blocks < (PAGE_SIZE>>9))
- BUG();
+ BUG_ON(sync_blocks < (PAGE_SIZE>>9));
if (len > (sync_blocks<<9))
len = sync_blocks<<9;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ab90a6d120204..617012bc107a0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1117,8 +1117,7 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
for (i=0; i<conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
- if (i == conf->copies)
- BUG();
+ BUG_ON(i == conf->copies);
update_head_pos(i, r10_bio);
d = r10_bio->devs[i].devnum;
@@ -1518,8 +1517,7 @@ static int init_resync(conf_t *conf)
int buffs;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
- if (conf->r10buf_pool)
- BUG();
+ BUG_ON(conf->r10buf_pool);
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
if (!conf->r10buf_pool)
return -ENOMEM;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dae740adaf657..31843604049cd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -73,10 +73,8 @@ static void print_raid5_conf (raid5_conf_t *conf);
static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
{
if (atomic_dec_and_test(&sh->count)) {
- if (!list_empty(&sh->lru))
- BUG();
- if (atomic_read(&conf->active_stripes)==0)
- BUG();
+ BUG_ON(!list_empty(&sh->lru));
+ BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
@@ -184,10 +182,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
raid5_conf_t *conf = sh->raid_conf;
int i;
- if (atomic_read(&sh->count) != 0)
- BUG();
- if (test_bit(STRIPE_HANDLE, &sh->state))
- BUG();
+ BUG_ON(atomic_read(&sh->count) != 0);
+ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
CHECK_DEVLOCK();
PRINTK("init_stripe called, stripe %llu\n",
@@ -269,8 +265,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
init_stripe(sh, sector, pd_idx, disks);
} else {
if (atomic_read(&sh->count)) {
- if (!list_empty(&sh->lru))
- BUG();
+ BUG_ON(!list_empty(&sh->lru));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
@@ -465,8 +460,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
spin_unlock_irq(&conf->device_lock);
if (!sh)
return 0;
- if (atomic_read(&sh->count))
- BUG();
+ BUG_ON(atomic_read(&sh->count));
shrink_buffers(sh, conf->pool_size);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
@@ -882,8 +876,7 @@ static void compute_parity(struct stripe_head *sh, int method)
ptr[0] = page_address(sh->dev[pd_idx].page);
switch(method) {
case READ_MODIFY_WRITE:
- if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
- BUG();
+ BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
for (i=disks ; i-- ;) {
if (i==pd_idx)
continue;
@@ -896,7 +889,7 @@ static void compute_parity(struct stripe_head *sh, int method)
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- if (sh->dev[i].written) BUG();
+ BUG_ON(sh->dev[i].written);
sh->dev[i].written = chosen;
check_xor();
}
@@ -912,7 +905,7 @@ static void compute_parity(struct stripe_head *sh, int method)
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- if (sh->dev[i].written) BUG();
+ BUG_ON(sh->dev[i].written);
sh->dev[i].written = chosen;
}
break;
@@ -995,8 +988,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
goto overlap;
- if (*bip && bi->bi_next && (*bip) != bi->bi_next)
- BUG();
+ BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip)
bi->bi_next = *bip;
*bip = bi;
@@ -1430,8 +1422,7 @@ static void handle_stripe(struct stripe_head *sh)
set_bit(STRIPE_HANDLE, &sh->state);
if (failed == 0) {
char *pagea;
- if (uptodate != disks)
- BUG();
+ BUG_ON(uptodate != disks);
compute_parity(sh, CHECK_PARITY);
uptodate--;
pagea = page_address(sh->dev[sh->pd_idx].page);
@@ -2096,8 +2087,7 @@ static void raid5d (mddev_t *mddev)
list_del_init(first);
atomic_inc(&sh->count);
- if (atomic_read(&sh->count)!= 1)
- BUG();
+ BUG_ON(atomic_read(&sh->count)!= 1);
spin_unlock_irq(&conf->device_lock);
handled++;
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index 6df4930fddeca..bc69355e0100e 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -91,10 +91,8 @@ static void print_raid6_conf (raid6_conf_t *conf);
static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
{
if (atomic_dec_and_test(&sh->count)) {
- if (!list_empty(&sh->lru))
- BUG();
- if (atomic_read(&conf->active_stripes)==0)
- BUG();
+ BUG_ON(!list_empty(&sh->lru));
+ BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
@@ -202,10 +200,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
raid6_conf_t *conf = sh->raid_conf;
int disks = conf->raid_disks, i;
- if (atomic_read(&sh->count) != 0)
- BUG();
- if (test_bit(STRIPE_HANDLE, &sh->state))
- BUG();
+ BUG_ON(atomic_read(&sh->count) != 0);
+ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
CHECK_DEVLOCK();
PRINTK("init_stripe called, stripe %llu\n",
@@ -284,13 +280,11 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
init_stripe(sh, sector, pd_idx);
} else {
if (atomic_read(&sh->count)) {
- if (!list_empty(&sh->lru))
- BUG();
+ BUG_ON(!list_empty(&sh->lru));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
- if (list_empty(&sh->lru))
- BUG();
+ BUG_ON(list_empty(&sh->lru));
list_del_init(&sh->lru);
}
}
@@ -353,8 +347,7 @@ static int drop_one_stripe(raid6_conf_t *conf)
spin_unlock_irq(&conf->device_lock);
if (!sh)
return 0;
- if (atomic_read(&sh->count))
- BUG();
+ BUG_ON(atomic_read(&sh->count));
shrink_buffers(sh, conf->raid_disks);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
@@ -780,7 +773,7 @@ static void compute_parity(struct stripe_head *sh, int method)
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- if (sh->dev[i].written) BUG();
+ BUG_ON(sh->dev[i].written);
sh->dev[i].written = chosen;
}
break;
@@ -970,8 +963,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
goto overlap;
- if (*bip && bi->bi_next && (*bip) != bi->bi_next)
- BUG();
+ BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip)
bi->bi_next = *bip;
*bip = bi;
@@ -1906,8 +1898,7 @@ static void raid6d (mddev_t *mddev)
list_del_init(first);
atomic_inc(&sh->count);
- if (atomic_read(&sh->count)!= 1)
- BUG();
+ BUG_ON(atomic_read(&sh->count)!= 1);
spin_unlock_irq(&conf->device_lock);
handled++;
@@ -2151,6 +2142,8 @@ static int run(mddev_t *mddev)
}
/* Ok, everything is just fine now */
+ sysfs_create_group(&mddev->kobj, &raid6_attrs_group);
+
mddev->array_size = mddev->size * (mddev->raid_disks - 2);
mddev->queue->unplug_fn = raid6_unplug_device;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index baa9f58beffc7..fffc711c260ca 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -51,18 +51,18 @@ config VIDEO_TVEEPROM
tristate
config USB_DABUSB
- tristate "DABUSB driver"
- depends on USB
- ---help---
- A Digital Audio Broadcasting (DAB) Receiver for USB and Linux
- brought to you by the DAB-Team
- <http://wwwbode.cs.tum.edu/Par/arch/dab/>. This driver can be taken
- as an example for URB-based bulk, control, and isochronous
- transactions. URB's are explained in
- <Documentation/usb/URB.txt>.
-
- To compile this driver as a module, choose M here: the
- module will be called dabusb.
+ tristate "DABUSB driver"
+ depends on USB
+ ---help---
+ A Digital Audio Broadcasting (DAB) Receiver for USB and Linux
+ brought to you by the DAB-Team
+ <http://wwwbode.cs.tum.edu/Par/arch/dab/>. This driver can be taken
+ as an example for URB-based bulk, control, and isochronous
+ transactions. URB's are explained in
+ <Documentation/usb/URB.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dabusb.
endmenu
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index 2337b41714e06..376ca48f1d1da 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -7,6 +7,7 @@ config DVB_BT8XX
select DVB_CX24110
select DVB_OR51211
select DVB_LGDT330X
+ select FW_LOADER
help
Support for PCI cards based on the Bt8xx PCI bridge. Examples are
the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards,
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 09e96e9ddbdfc..04578df3f2491 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -141,12 +141,18 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
}
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
- void *mem = vmalloc(DVR_BUFFER_SIZE);
+ void *mem;
+ if (!dvbdev->readers) {
+ mutex_unlock(&dmxdev->mutex);
+ return -EBUSY;
+ }
+ mem = vmalloc(DVR_BUFFER_SIZE);
if (!mem) {
mutex_unlock(&dmxdev->mutex);
return -ENOMEM;
}
dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
+ dvbdev->readers--;
}
if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
@@ -184,6 +190,7 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
dmxdev->dvr_orig_fe);
}
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
+ dvbdev->readers++;
if (dmxdev->dvr_buffer.data) {
void *mem = dmxdev->dvr_buffer.data;
mb();
@@ -1029,8 +1036,7 @@ static struct file_operations dvb_dvr_fops = {
static struct dvb_device dvbdev_dvr = {
.priv = NULL,
- .users = 1,
- .writers = 1,
+ .readers = 1,
.fops = &dvb_dvr_fops
};
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 2c3ea8f95dcd4..4f8f257e67951 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -105,6 +105,7 @@ struct dvb_frontend_private {
fe_status_t status;
unsigned long tune_mode_flags;
unsigned int delay;
+ unsigned int reinitialise;
/* swzigzag values */
unsigned int state;
@@ -121,6 +122,7 @@ struct dvb_frontend_private {
unsigned int check_wrapped;
};
+static void dvb_frontend_wakeup(struct dvb_frontend *fe);
static void dvb_frontend_add_event(struct dvb_frontend *fe, fe_status_t status)
{
@@ -213,6 +215,15 @@ static void dvb_frontend_init(struct dvb_frontend *fe)
fe->ops->init(fe);
}
+void dvb_frontend_reinitialise(struct dvb_frontend *fe)
+{
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+
+ fepriv->reinitialise = 1;
+ dvb_frontend_wakeup(fe);
+}
+EXPORT_SYMBOL(dvb_frontend_reinitialise);
+
static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepriv, int locked)
{
int q2;
@@ -505,8 +516,8 @@ static int dvb_frontend_thread(void *data)
fepriv->quality = 0;
fepriv->delay = 3*HZ;
fepriv->status = 0;
- dvb_frontend_init(fe);
fepriv->wakeup = 0;
+ fepriv->reinitialise = 1;
while (1) {
up(&fepriv->sem); /* is locked when we enter the thread... */
@@ -524,6 +535,11 @@ static int dvb_frontend_thread(void *data)
if (down_interruptible(&fepriv->sem))
break;
+ if (fepriv->reinitialise) {
+ dvb_frontend_init(fe);
+ fepriv->reinitialise = 0;
+ }
+
/* do an iteration of the tuning loop */
if (fe->ops->tune) {
/* have we been asked to retune? */
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index d5aee5ad67a0f..5926a3b745c9d 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -112,6 +112,8 @@ extern int dvb_register_frontend(struct dvb_adapter* dvb,
extern int dvb_unregister_frontend(struct dvb_frontend* fe);
+extern void dvb_frontend_reinitialise(struct dvb_frontend *fe);
+
extern void dvb_frontend_sleep_until(struct timeval *waketime, u32 add_usec);
extern s32 timeval_usec_diff(struct timeval lasttime, struct timeval curtime);
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index a14e737ec848d..7edd6362b9cc6 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -233,6 +233,45 @@ static struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
{ 0xfe, 0x4e, KEY_POWER },
};
+static struct dvb_usb_rc_key dvico_portable_rc_keys[] = {
+ { 0xfc, 0x02, KEY_SETUP }, /* Profile */
+ { 0xfc, 0x43, KEY_POWER2 },
+ { 0xfc, 0x06, KEY_EPG },
+ { 0xfc, 0x5a, KEY_BACK },
+ { 0xfc, 0x05, KEY_MENU },
+ { 0xfc, 0x47, KEY_INFO },
+ { 0xfc, 0x01, KEY_TAB },
+ { 0xfc, 0x42, KEY_PREVIOUSSONG },/* Replay */
+ { 0xfc, 0x49, KEY_VOLUMEUP },
+ { 0xfc, 0x09, KEY_VOLUMEDOWN },
+ { 0xfc, 0x54, KEY_CHANNELUP },
+ { 0xfc, 0x0b, KEY_CHANNELDOWN },
+ { 0xfc, 0x16, KEY_CAMERA },
+ { 0xfc, 0x40, KEY_TUNER }, /* ATV/DTV */
+ { 0xfc, 0x45, KEY_OPEN },
+ { 0xfc, 0x19, KEY_1 },
+ { 0xfc, 0x18, KEY_2 },
+ { 0xfc, 0x1b, KEY_3 },
+ { 0xfc, 0x1a, KEY_4 },
+ { 0xfc, 0x58, KEY_5 },
+ { 0xfc, 0x59, KEY_6 },
+ { 0xfc, 0x15, KEY_7 },
+ { 0xfc, 0x14, KEY_8 },
+ { 0xfc, 0x17, KEY_9 },
+ { 0xfc, 0x44, KEY_ANGLE }, /* Aspect */
+ { 0xfc, 0x55, KEY_0 },
+ { 0xfc, 0x07, KEY_ZOOM },
+ { 0xfc, 0x0a, KEY_REWIND },
+ { 0xfc, 0x08, KEY_PLAYPAUSE },
+ { 0xfc, 0x4b, KEY_FASTFORWARD },
+ { 0xfc, 0x5b, KEY_MUTE },
+ { 0xfc, 0x04, KEY_STOP },
+ { 0xfc, 0x56, KEY_RECORD },
+ { 0xfc, 0x57, KEY_POWER },
+ { 0xfc, 0x41, KEY_UNKNOWN }, /* INPUT */
+ { 0xfc, 0x00, KEY_UNKNOWN }, /* HD */
+};
+
static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
{
static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 };
@@ -511,6 +550,11 @@ static struct dvb_usb_properties cxusb_bluebird_lgh064f_properties = {
.i2c_algo = &cxusb_i2c_algo,
+ .rc_interval = 100,
+ .rc_key_map = dvico_portable_rc_keys,
+ .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_query = cxusb_rc_query,
+
.generic_bulk_ctrl_endpoint = 0x01,
/* parameter for the MPEG2-data transfer */
.urb = {
@@ -600,6 +644,11 @@ static struct dvb_usb_properties cxusb_bluebird_lgz201_properties = {
.i2c_algo = &cxusb_i2c_algo,
+ .rc_interval = 100,
+ .rc_key_map = dvico_portable_rc_keys,
+ .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_query = cxusb_rc_query,
+
.generic_bulk_ctrl_endpoint = 0x01,
/* parameter for the MPEG2-data transfer */
.urb = {
@@ -640,6 +689,11 @@ static struct dvb_usb_properties cxusb_bluebird_dtt7579_properties = {
.i2c_algo = &cxusb_i2c_algo,
+ .rc_interval = 100,
+ .rc_key_map = dvico_portable_rc_keys,
+ .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_query = cxusb_rc_query,
+
.generic_bulk_ctrl_endpoint = 0x01,
/* parameter for the MPEG2-data transfer */
.urb = {
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index 12ebaf8bddca1..70afcfd141ca0 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -94,12 +94,14 @@ static int dtt200u_frontend_attach(struct dvb_usb_device *d)
static struct dvb_usb_properties dtt200u_properties;
static struct dvb_usb_properties wt220u_properties;
+static struct dvb_usb_properties wt220u_zl0353_properties;
static int dtt200u_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
if (dvb_usb_device_init(intf,&dtt200u_properties,THIS_MODULE,NULL) == 0 ||
- dvb_usb_device_init(intf,&wt220u_properties,THIS_MODULE,NULL) == 0)
+ dvb_usb_device_init(intf,&wt220u_properties,THIS_MODULE,NULL) == 0 ||
+ dvb_usb_device_init(intf,&wt220u_zl0353_properties,THIS_MODULE,NULL) == 0)
return 0;
return -ENODEV;
@@ -110,6 +112,8 @@ static struct usb_device_id dtt200u_usb_table [] = {
{ USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_DTT200U_WARM) },
{ USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_COLD) },
{ USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_WARM) },
+ { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_ZL0353_COLD) },
+ { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_ZL0353_WARM) },
{ 0 },
};
MODULE_DEVICE_TABLE(usb, dtt200u_usb_table);
@@ -196,6 +200,47 @@ static struct dvb_usb_properties wt220u_properties = {
}
};
+static struct dvb_usb_properties wt220u_zl0353_properties = {
+ .caps = DVB_USB_HAS_PID_FILTER | DVB_USB_NEED_PID_FILTERING,
+ .pid_filter_count = 15,
+
+ .usb_ctrl = CYPRESS_FX2,
+ .firmware = "dvb-usb-wt220u-zl0353-01.fw",
+
+ .power_ctrl = dtt200u_power_ctrl,
+ .streaming_ctrl = dtt200u_streaming_ctrl,
+ .pid_filter = dtt200u_pid_filter,
+ .frontend_attach = dtt200u_frontend_attach,
+
+ .rc_interval = 300,
+ .rc_key_map = dtt200u_rc_keys,
+ .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_query = dtt200u_rc_query,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+
+ /* parameter for the MPEG2-data transfer */
+ .urb = {
+ .type = DVB_USB_BULK,
+ .count = 7,
+ .endpoint = 0x02,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+ }
+ }
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { .name = "WideView WT-220U PenType Receiver (based on ZL353)",
+ .cold_ids = { &dtt200u_usb_table[4], NULL },
+ .warm_ids = { &dtt200u_usb_table[5], NULL },
+ },
+ { NULL },
+ }
+};
+
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver dtt200u_usb_driver = {
.name = "dvb_usb_dtt200u",
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 4a1b9e77e3395..cb239049b0983 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -83,6 +83,8 @@
#define USB_PID_DTT200U_WARM 0x0301
#define USB_PID_WT220U_COLD 0x0222
#define USB_PID_WT220U_WARM 0x0221
+#define USB_PID_WT220U_ZL0353_COLD 0x022a
+#define USB_PID_WT220U_ZL0353_WARM 0x022b
#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300
#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301
#define USB_PID_NEBULA_DIGITV 0x0201
diff --git a/drivers/media/dvb/dvb-usb/vp702x-fe.c b/drivers/media/dvb/dvb-usb/vp702x-fe.c
index b6d95e1c9c527..2a89f8c5da99c 100644
--- a/drivers/media/dvb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/dvb/dvb-usb/vp702x-fe.c
@@ -147,8 +147,9 @@ static int vp702x_fe_set_frontend(struct dvb_frontend* fe,
cmd[4] = (sr >> 4) & 0xff;
cmd[5] = (sr << 4) & 0xf0;
- deb_fe("setting frontend to: %u -> %u (%x) LNB-based GHz, symbolrate: %d -> %Lu (%Lx)\n",
- fep->frequency,freq,freq, fep->u.qpsk.symbol_rate, sr, sr);
+ deb_fe("setting frontend to: %u -> %u (%x) LNB-based GHz, symbolrate: %d -> %lu (%lx)\n",
+ fep->frequency,freq,freq, fep->u.qpsk.symbol_rate,
+ (unsigned long) sr, (unsigned long) sr);
/* if (fep->inversion == INVERSION_ON)
cmd[6] |= 0x80; */
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 94233168d2415..37d5e0af1683a 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -181,6 +181,11 @@ config DVB_OR51211
help
An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
+ This driver needs external firmware. Please use the command
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
+ download it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
config DVB_OR51132
tristate "Oren OR51132 based"
depends on DVB_CORE
@@ -189,6 +194,13 @@ config DVB_OR51132
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
+ This driver needs external firmware. Please use the commands
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware or51132_vsb" and/or
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware or51132_qam" to
+ download firmwares for 8VSB and QAM64/256, respectively. Copy them to
+ /usr/lib/hotplug/firmware or /lib/firmware (depending on
+ configuration of firmware hotplug).
+
config DVB_BCM3510
tristate "Broadcom BCM3510"
depends on DVB_CORE
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index 8e8df7b4ca0e5..b83dafa4e12cd 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -52,7 +52,6 @@ struct tda1004x_state {
struct dvb_frontend frontend;
/* private demod data */
- u8 initialised;
enum tda1004x_demod demod_type;
};
@@ -594,9 +593,6 @@ static int tda10045_init(struct dvb_frontend* fe)
dprintk("%s\n", __FUNCTION__);
- if (state->initialised)
- return 0;
-
if (tda10045_fwupload(fe)) {
printk("tda1004x: firmware upload failed\n");
return -EIO;
@@ -626,7 +622,6 @@ static int tda10045_init(struct dvb_frontend* fe)
tda1004x_write_mask(state, 0x1f, 0x01, state->config->invert_oclk);
- state->initialised = 1;
return 0;
}
@@ -635,9 +630,6 @@ static int tda10046_init(struct dvb_frontend* fe)
struct tda1004x_state* state = fe->demodulator_priv;
dprintk("%s\n", __FUNCTION__);
- if (state->initialised)
- return 0;
-
if (tda10046_fwupload(fe)) {
printk("tda1004x: firmware upload failed\n");
return -EIO;
@@ -697,7 +689,6 @@ static int tda10046_init(struct dvb_frontend* fe)
// tda1004x_write_mask(state, 0x50, 0x80, 0x80); // handle out of guard echoes
tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7);
- state->initialised = 1;
return 0;
}
@@ -1207,7 +1198,6 @@ static int tda1004x_sleep(struct dvb_frontend* fe)
tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1);
break;
}
- state->initialised = 0;
return 0;
}
@@ -1271,7 +1261,6 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
state->config = config;
state->i2c = i2c;
memcpy(&state->ops, &tda10045_ops, sizeof(struct dvb_frontend_ops));
- state->initialised = 0;
state->demod_type = TDA1004X_DEMOD_TDA10045;
/* check if the demod is there */
@@ -1330,7 +1319,6 @@ struct dvb_frontend* tda10046_attach(const struct tda1004x_config* config,
state->config = config;
state->i2c = i2c;
memcpy(&state->ops, &tda10046_ops, sizeof(struct dvb_frontend_ops));
- state->initialised = 0;
state->demod_type = TDA1004X_DEMOD_TDA10046;
/* check if the demod is there */
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 840efec32cb62..d028245c8eede 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -87,6 +87,7 @@ static int volume = 255;
static int budgetpatch;
static int wss_cfg_4_3 = 0x4008;
static int wss_cfg_16_9 = 0x0007;
+static int tv_standard;
module_param_named(debug, av7110_debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (bitmask, default 0)");
@@ -109,6 +110,8 @@ module_param(wss_cfg_4_3, int, 0444);
MODULE_PARM_DESC(wss_cfg_4_3, "WSS 4:3 - default 0x4008 - bit 15: disable, 14: burst mode, 13..0: wss data");
module_param(wss_cfg_16_9, int, 0444);
MODULE_PARM_DESC(wss_cfg_16_9, "WSS 16:9 - default 0x0007 - bit 15: disable, 14: burst mode, 13..0: wss data");
+module_param(tv_standard, int, 0444);
+MODULE_PARM_DESC(tv_standard, "TV standard: 0 PAL (default), 1 NTSC");
static void restart_feeds(struct av7110 *av7110);
@@ -2123,7 +2126,7 @@ static int frontend_init(struct av7110 *av7110)
read_pwm(av7110));
break;
case 0x0003:
- /* Haupauge DVB-C 2.1 VES1820/ALPS TDBE2 */
+ /* Hauppauge DVB-C 2.1 VES1820/ALPS TDBE2 */
av7110->fe = ves1820_attach(&alps_tdbe2_config, &av7110->i2c_adap,
read_pwm(av7110));
break;
@@ -2543,6 +2546,9 @@ static int __devinit av7110_attach(struct saa7146_dev* dev,
av7110->osdwin = 1;
mutex_init(&av7110->osd_mutex);
+ /* TV standard */
+ av7110->vidmode = tv_standard == 1 ? VIDEO_MODE_NTSC : VIDEO_MODE_PAL;
+
/* ARM "watchdog" */
init_waitqueue_head(&av7110->arm_wait);
av7110->arm_thread = NULL;
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 400facec74071..2eff09f638d3b 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -1479,8 +1479,6 @@ int av7110_av_init(struct av7110 *av7110)
void (*play[])(u8 *, int, void *) = { play_audio_cb, play_video_cb };
int i, ret;
- av7110->vidmode = VIDEO_MODE_PAL;
-
for (i = 0; i < 2; i++) {
struct ipack *ipack = av7110->ipack + i;
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 9dd4745f53123..8efe3ce5f66c8 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -60,11 +60,11 @@ struct budget_av {
struct dvb_ca_en50221 ca;
};
-/* GPIO CI Connections:
- * 0 - Vcc/Reset (Reset is controlled by capacitor)
- * 1 - Attribute Memory
- * 2 - Card Enable (Active Low)
- * 3 - Card Detect
+/* GPIO Connections:
+ * 0 - Vcc/Reset (Reset is controlled by capacitor). Resets the frontend *AS WELL*!
+ * 1 - CI memory select 0=>IO memory, 1=>Attribute Memory
+ * 2 - CI Card Enable (Active Low)
+ * 3 - CI Card Detect
*/
/****************************************************************************
@@ -214,6 +214,9 @@ static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
while (--timeout > 0 && ciintf_read_attribute_mem(ca, slot, 0) != 0x1d)
msleep(100);
+ /* reinitialise the frontend */
+ dvb_frontend_reinitialise(budget_av->budget.dvb_frontend);
+
if (timeout <= 0)
{
printk(KERN_ERR "budget-av: cam reset failed (timeout).\n");
diff --git a/drivers/media/dvb/ttpci/budget-core.c b/drivers/media/dvb/ttpci/budget-core.c
index 633e68c341c87..ea2066d461fc9 100644
--- a/drivers/media/dvb/ttpci/budget-core.c
+++ b/drivers/media/dvb/ttpci/budget-core.c
@@ -39,9 +39,21 @@
#include "budget.h"
#include "ttpci-eeprom.h"
+#define TS_WIDTH (2 * TS_SIZE)
+#define TS_WIDTH_ACTIVY TS_SIZE
+#define TS_HEIGHT_MASK 0xf00
+#define TS_HEIGHT_MASK_ACTIVY 0xc00
+#define TS_MIN_BUFSIZE_K 188
+#define TS_MAX_BUFSIZE_K 1410
+#define TS_MAX_BUFSIZE_K_ACTIVY 564
+#define BUFFER_WARNING_WAIT (30*HZ)
+
int budget_debug;
+static int dma_buffer_size = TS_MIN_BUFSIZE_K;
module_param_named(debug, budget_debug, int, 0644);
+module_param_named(bufsize, dma_buffer_size, int, 0444);
MODULE_PARM_DESC(debug, "Turn on/off budget debugging (default:off).");
+MODULE_PARM_DESC(bufsize, "DMA buffer size in KB, default: 188, min: 188, max: 1410 (Activy: 564)");
/****************************************************************************
* TT budget / WinTV Nova
@@ -70,11 +82,10 @@ static int start_ts_capture(struct budget *budget)
saa7146_write(dev, MC1, MASK_20); // DMA3 off
- memset(budget->grabbing, 0x00, TS_HEIGHT * TS_WIDTH);
+ memset(budget->grabbing, 0x00, budget->buffer_size);
saa7146_write(dev, PCI_BT_V1, 0x001c0000 | (saa7146_read(dev, PCI_BT_V1) & ~0x001f0000));
- budget->tsf = 0xff;
budget->ttbp = 0;
/*
@@ -115,16 +126,12 @@ static int start_ts_capture(struct budget *budget)
saa7146_write(dev, BASE_ODD3, 0);
saa7146_write(dev, BASE_EVEN3, 0);
- saa7146_write(dev, PROT_ADDR3, TS_WIDTH * TS_HEIGHT);
+ saa7146_write(dev, PROT_ADDR3, budget->buffer_size);
saa7146_write(dev, BASE_PAGE3, budget->pt.dma | ME1 | 0x90);
- if (budget->card->type == BUDGET_FS_ACTIVY) {
- saa7146_write(dev, PITCH3, TS_WIDTH / 2);
- saa7146_write(dev, NUM_LINE_BYTE3, ((TS_HEIGHT * 2) << 16) | (TS_WIDTH / 2));
- } else {
- saa7146_write(dev, PITCH3, TS_WIDTH);
- saa7146_write(dev, NUM_LINE_BYTE3, (TS_HEIGHT << 16) | TS_WIDTH);
- }
+ saa7146_write(dev, PITCH3, budget->buffer_width);
+ saa7146_write(dev, NUM_LINE_BYTE3,
+ (budget->buffer_height << 16) | budget->buffer_width);
saa7146_write(dev, MC2, (MASK_04 | MASK_20));
@@ -141,11 +148,12 @@ static void vpeirq(unsigned long data)
u8 *mem = (u8 *) (budget->grabbing);
u32 olddma = budget->ttbp;
u32 newdma = saa7146_read(budget->dev, PCI_VDP3);
+ u32 count;
/* nearest lower position divisible by 188 */
newdma -= newdma % 188;
- if (newdma >= TS_BUFLEN)
+ if (newdma >= budget->buffer_size)
return;
budget->ttbp = newdma;
@@ -154,11 +162,24 @@ static void vpeirq(unsigned long data)
return;
if (newdma > olddma) { /* no wraparound, dump olddma..newdma */
- dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, (newdma - olddma) / 188);
+ count = newdma - olddma;
+ dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, count / 188);
} else { /* wraparound, dump olddma..buflen and 0..newdma */
- dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, (TS_BUFLEN - olddma) / 188);
+ count = budget->buffer_size - olddma;
+ dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, count / 188);
+ count += newdma;
dvb_dmx_swfilter_packets(&budget->demux, mem, newdma / 188);
}
+
+ if (count > budget->buffer_warning_threshold)
+ budget->buffer_warnings++;
+
+ if (budget->buffer_warnings && time_after(jiffies, budget->buffer_warning_time)) {
+ printk("%s %s: used %d times >80%% of buffer (%u bytes now)\n",
+ budget->dev->name, __FUNCTION__, budget->buffer_warnings, count);
+ budget->buffer_warning_time = jiffies + BUFFER_WARNING_WAIT;
+ budget->buffer_warnings = 0;
+ }
}
@@ -341,9 +362,10 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
struct saa7146_pci_extension_data *info,
struct module *owner)
{
- int length = TS_WIDTH * TS_HEIGHT;
int ret = 0;
struct budget_info *bi = info->ext_priv;
+ int max_bufsize;
+ int height_mask;
memset(budget, 0, sizeof(struct budget));
@@ -352,6 +374,32 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
budget->card = bi;
budget->dev = (struct saa7146_dev *) dev;
+ if (budget->card->type == BUDGET_FS_ACTIVY) {
+ budget->buffer_width = TS_WIDTH_ACTIVY;
+ max_bufsize = TS_MAX_BUFSIZE_K_ACTIVY;
+ height_mask = TS_HEIGHT_MASK_ACTIVY;
+ } else {
+ budget->buffer_width = TS_WIDTH;
+ max_bufsize = TS_MAX_BUFSIZE_K;
+ height_mask = TS_HEIGHT_MASK;
+ }
+
+ if (dma_buffer_size < TS_MIN_BUFSIZE_K)
+ dma_buffer_size = TS_MIN_BUFSIZE_K;
+ else if (dma_buffer_size > max_bufsize)
+ dma_buffer_size = max_bufsize;
+
+ budget->buffer_height = dma_buffer_size * 1024 / budget->buffer_width;
+ budget->buffer_height &= height_mask;
+ budget->buffer_size = budget->buffer_height * budget->buffer_width;
+ budget->buffer_warning_threshold = budget->buffer_size * 80/100;
+ budget->buffer_warnings = 0;
+ budget->buffer_warning_time = jiffies;
+
+ dprintk(2, "%s: width = %d, height = %d\n",
+ budget->dev->name, budget->buffer_width, budget->buffer_height);
+ printk("%s: dma buffer size %u\n", budget->dev->name, budget->buffer_size);
+
dvb_register_adapter(&budget->dvb_adapter, budget->card->name, owner);
/* set dd1 stream a & b */
@@ -392,7 +440,7 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
ttpci_eeprom_parse_mac(&budget->i2c_adap, budget->dvb_adapter.proposed_mac);
if (NULL ==
- (budget->grabbing = saa7146_vmalloc_build_pgtable(dev->pci, length, &budget->pt))) {
+ (budget->grabbing = saa7146_vmalloc_build_pgtable(dev->pci, budget->buffer_size, &budget->pt))) {
ret = -ENOMEM;
goto err;
}
diff --git a/drivers/media/dvb/ttpci/budget-patch.c b/drivers/media/dvb/ttpci/budget-patch.c
index 9fc9185a84269..1b3aaac5e7636 100644
--- a/drivers/media/dvb/ttpci/budget-patch.c
+++ b/drivers/media/dvb/ttpci/budget-patch.c
@@ -577,6 +577,17 @@ static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_exte
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
// Set RPS1 Address register to point to RPS code (r108 p42)
saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
+
+ if (!(budget = kmalloc (sizeof(struct budget_patch), GFP_KERNEL)))
+ return -ENOMEM;
+
+ dprintk(2, "budget: %p\n", budget);
+
+ if ((err = ttpci_budget_init (budget, dev, info, THIS_MODULE))) {
+ kfree (budget);
+ return err;
+ }
+
// Set Source Line Counter Threshold, using BRS (rCC p43)
// It generates HS event every TS_HEIGHT lines
// this is related to TS_WIDTH set in register
@@ -585,24 +596,13 @@ static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_exte
//,then RPS_THRESH1
// should be set to trigger every TS_HEIGHT (512) lines.
//
- saa7146_write(dev, RPS_THRESH1, (TS_HEIGHT*1) | MASK_12 );
+ saa7146_write(dev, RPS_THRESH1, budget->buffer_height | MASK_12 );
// saa7146_write(dev, RPS_THRESH0, ((TS_HEIGHT/2)<<16) |MASK_28| (TS_HEIGHT/2) |MASK_12 );
// Enable RPS1 (rFC p33)
saa7146_write(dev, MC1, (MASK_13 | MASK_29));
- if (!(budget = kmalloc (sizeof(struct budget_patch), GFP_KERNEL)))
- return -ENOMEM;
-
- dprintk(2, "budget: %p\n", budget);
-
- if ((err = ttpci_budget_init (budget, dev, info, THIS_MODULE))) {
- kfree (budget);
- return err;
- }
-
-
dev->ext_priv = budget;
budget->dvb_adapter.priv = budget;
diff --git a/drivers/media/dvb/ttpci/budget.h b/drivers/media/dvb/ttpci/budget.h
index 4ac0f4d080252..ecea3a13030e6 100644
--- a/drivers/media/dvb/ttpci/budget.h
+++ b/drivers/media/dvb/ttpci/budget.h
@@ -58,7 +58,13 @@ struct budget {
int ci_present;
int video_port;
- u8 tsf;
+ u32 buffer_width;
+ u32 buffer_height;
+ u32 buffer_size;
+ u32 buffer_warning_threshold;
+ u32 buffer_warnings;
+ unsigned long buffer_warning_time;
+
u32 ttbp;
int feeding;
@@ -79,11 +85,6 @@ static struct saa7146_pci_extension_data x_var = { \
.ext_priv = &x_var ## _info, \
.ext = &budget_extension };
-#define TS_WIDTH (376)
-#define TS_HEIGHT (512)
-#define TS_BUFLEN (TS_WIDTH*TS_HEIGHT)
-#define TS_MAX_PACKETS (TS_BUFLEN/TS_SIZE)
-
#define BUDGET_TT 0
#define BUDGET_TT_HW_DISEQC 1
#define BUDGET_PATCH 3
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f31a19890b15d..85888a8a93c9b 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -300,7 +300,7 @@ config VIDEO_OVCAMCHIP
camera drivers.
To compile this driver as a module, choose M here: the
- module will be called ovcamchip
+ module will be called ovcamchip.
config VIDEO_M32R_AR
tristate "AR devices"
@@ -316,6 +316,13 @@ config VIDEO_M32R_AR_M64278
Say Y here to use the Renesas M64278E-800 camera module,
which supports VGA(640x480 pixcels) size of images.
+#
+# Encoder / Decoder module configuration
+#
+
+menu "Encoders and Decoders"
+ depends on VIDEO_DEV
+
config VIDEO_MSP3400
tristate "Micronas MSP34xx audio decoders"
depends on VIDEO_DEV && I2C
@@ -323,7 +330,7 @@ config VIDEO_MSP3400
Support for the Micronas MSP34xx series of audio decoders.
To compile this driver as a module, choose M here: the
- module will be called msp3400
+ module will be called msp3400.
config VIDEO_CS53L32A
tristate "Cirrus Logic CS53L32A audio ADC"
@@ -333,17 +340,27 @@ config VIDEO_CS53L32A
stereo A/D converter.
To compile this driver as a module, choose M here: the
- module will be called cs53l32a
+ module will be called cs53l32a.
config VIDEO_WM8775
- tristate "Wolfson Microelectronics WM8775 audio ADC"
+ tristate "Wolfson Microelectronics WM8775 audio ADC with input mixer"
depends on VIDEO_DEV && I2C && EXPERIMENTAL
---help---
- Support for the Wolfson Microelectronics WM8775
- high performance stereo A/D Converter.
+ Support for the Wolfson Microelectronics WM8775 high
+ performance stereo A/D Converter with a 4 channel input mixer.
To compile this driver as a module, choose M here: the
- module will be called wm8775
+ module will be called wm8775.
+
+config VIDEO_WM8739
+ tristate "Wolfson Microelectronics WM8739 stereo audio ADC"
+ depends on VIDEO_DEV && I2C && EXPERIMENTAL
+ ---help---
+ Support for the Wolfson Microelectronics WM8739
+ stereo A/D Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wm8739.
source "drivers/media/video/cx25840/Kconfig"
@@ -354,7 +371,7 @@ config VIDEO_SAA711X
Support for the Philips SAA7113/4/5 video decoders.
To compile this driver as a module, choose M here: the
- module will be called saa7115
+ module will be called saa7115.
config VIDEO_SAA7127
tristate "Philips SAA7127/9 digital video encoders"
@@ -363,7 +380,32 @@ config VIDEO_SAA7127
Support for the Philips SAA7127/9 digital video encoders.
To compile this driver as a module, choose M here: the
- module will be called saa7127
+ module will be called saa7127.
+
+config VIDEO_UPD64031A
+ tristate "NEC Electronics uPD64031A Ghost Reduction"
+ depends on VIDEO_DEV && I2C && EXPERIMENTAL
+ ---help---
+ Support for the NEC Electronics uPD64031A Ghost Reduction
+ video chip. It is most often found in NTSC TV cards made for
+ Japan and is used to reduce the 'ghosting' effect that can
+ be present in analog TV broadcasts.
+
+ To compile this driver as a module, choose M here: the
+ module will be called upd64031a.
+
+config VIDEO_UPD64083
+ tristate "NEC Electronics uPD64083 3-Dimensional Y/C separation"
+ depends on VIDEO_DEV && I2C && EXPERIMENTAL
+ ---help---
+ Support for the NEC Electronics uPD64083 3-Dimensional Y/C
+ separation video chip. It is used to improve the quality of
+ the colors of a composite signal.
+
+ To compile this driver as a module, choose M here: the
+ module will be called upd64083.
+
+endmenu # encoder / decoder chips
#
# USB Multimedia device configuration
@@ -374,20 +416,6 @@ menu "V4L USB devices"
source "drivers/media/video/em28xx/Kconfig"
-config USB_VICAM
- tristate "USB 3com HomeConnect (aka vicam) support (EXPERIMENTAL)"
- depends on USB && VIDEO_DEV && EXPERIMENTAL
- ---help---
- Say Y here if you have 3com homeconnect camera (vicam).
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" (under Multimedia Devices) to use this driver.
- Information on this API and pointers to "v4l" programs may be found
- at <file:Documentation/video4linux/API.html>.
-
- To compile this driver as a module, choose M here: the
- module will be called vicam.
-
config USB_DSBR
tristate "D-Link USB FM radio support (EXPERIMENTAL)"
depends on USB && VIDEO_DEV && EXPERIMENTAL
@@ -397,79 +425,20 @@ config USB_DSBR
you must connect the line out connector to a sound card or a
set of speakers.
- This driver uses the Video For Linux API. You must enable
- (Y or M in config) Video For Linux (under Character Devices)
- to use this driver. Information on this API and pointers to
- "v4l" programs may be found at
- <file:Documentation/video4linux/API.html>.
-
To compile this driver as a module, choose M here: the
module will be called dsbr100.
-config USB_ET61X251
- tristate "USB ET61X[12]51 PC Camera Controller support"
- depends on USB && VIDEO_DEV
- ---help---
- Say Y here if you want support for cameras based on Etoms ET61X151
- or ET61X251 PC Camera Controllers.
-
- See <file:Documentation/usb/et61x251.txt> for more informations.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" to use this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called et61x251.
+source "drivers/media/video/usbvideo/Kconfig"
-config USB_IBMCAM
- tristate "USB IBM (Xirlink) C-it Camera support"
- depends on USB && VIDEO_DEV
- ---help---
- Say Y here if you want to connect a IBM "C-It" camera, also known as
- "Xirlink PC Camera" to your computer's USB port. For more
- information, read <file:Documentation/usb/ibmcam.txt>.
-
- This driver uses the Video For Linux API. You must enable
- (Y or M in config) Video For Linux (under Character Devices)
- to use this driver. Information on this API and pointers to
- "v4l" programs may be found at
- <file:Documentation/video4linux/API.html>.
-
- To compile this driver as a module, choose M here: the
- module will be called ibmcam.
-
- This camera has several configuration options which
- can be specified when you load the module. Read
- <file:Documentation/usb/ibmcam.txt> to learn more.
-
-config USB_KONICAWC
- tristate "USB Konica Webcam support"
- depends on USB && VIDEO_DEV
- ---help---
- Say Y here if you want support for webcams based on a Konica
- chipset. This is known to work with the Intel YC76 webcam.
-
- This driver uses the Video For Linux API. You must enable
- (Y or M in config) Video For Linux (under Character Devices)
- to use this driver. Information on this API and pointers to
- "v4l" programs may be found at
- <file:Documentation/video4linux/API.html>.
-
- To compile this driver as a module, choose M here: the
- module will be called konicawc.
+source "drivers/media/video/et61x251/Kconfig"
config USB_OV511
tristate "USB OV511 Camera support"
depends on USB && VIDEO_DEV
---help---
Say Y here if you want to connect this type of camera to your
- computer's USB port. See <file:Documentation/usb/ov511.txt> for more
- information and for a list of supported cameras.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" (under Character Devices) to use this driver.
- Information on this API and pointers to "v4l" programs may be found
- at <file:Documentation/video4linux/API.html>.
+ computer's USB port. See <file:Documentation/video4linux/ov511.txt>
+ for more information and for a list of supported cameras.
To compile this driver as a module, choose M here: the
module will be called ov511.
@@ -479,31 +448,13 @@ config USB_SE401
depends on USB && VIDEO_DEV
---help---
Say Y here if you want to connect this type of camera to your
- computer's USB port. See <file:Documentation/usb/se401.txt> for more
- information and for a list of supported cameras.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" (under Multimedia Devices) to use this driver.
- Information on this API and pointers to "v4l" programs may be found
- at <file:Documentation/video4linux/API.html>.
+ computer's USB port. See <file:Documentation/video4linux/se401.txt>
+ for more information and for a list of supported cameras.
To compile this driver as a module, choose M here: the
module will be called se401.
-config USB_SN9C102
- tristate "USB SN9C10x PC Camera Controller support"
- depends on USB && VIDEO_DEV
- ---help---
- Say Y here if you want support for cameras based on SONiX SN9C101,
- SN9C102 or SN9C103 PC Camera Controllers.
-
- See <file:Documentation/usb/sn9c102.txt> for more informations.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" to use this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called sn9c102.
+source "drivers/media/video/sn9c102/Kconfig"
config USB_STV680
tristate "USB STV680 (Pencam) Camera support"
@@ -511,20 +462,16 @@ config USB_STV680
---help---
Say Y here if you want to connect this type of camera to your
computer's USB port. This includes the Pencam line of cameras.
- See <file:Documentation/usb/stv680.txt> for more information and for
- a list of supported cameras.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" (under Multimedia Devices) to use this driver.
- Information on this API and pointers to "v4l" programs may be found
- at <file:Documentation/video4linux/API.html>.
+ See <file:Documentation/video4linux/stv680.txt> for more information
+ and for a list of supported cameras.
To compile this driver as a module, choose M here: the
module will be called stv680.
config USB_W9968CF
tristate "USB W996[87]CF JPEG Dual Mode Camera support"
- depends on USB && VIDEO_DEV && I2C && VIDEO_OVCAMCHIP
+ depends on USB && VIDEO_DEV && I2C
+ select VIDEO_OVCAMCHIP
---help---
Say Y here if you want support for cameras based on OV681 or
Winbond W9967CF/W9968CF JPEG USB Dual Mode Camera Chips.
@@ -534,64 +481,14 @@ config USB_W9968CF
resolutions and framerates, but cannot be included in the official
Linux kernel for performance purposes.
- See <file:Documentation/usb/w9968cf.txt> for more informations.
-
- This driver uses the Video For Linux and the I2C APIs. It needs the
- OmniVision Camera Chip support as well. You must say Y or M to
- "Video For Linux", "I2C Support" and "OmniVision Camera Chip
- support" to use this driver.
+ See <file:Documentation/video4linux/w9968cf.txt> for more info.
To compile this driver as a module, choose M here: the
module will be called w9968cf.
-config USB_ZC0301
- tristate "USB ZC0301 Image Processor and Control Chip support"
- depends on USB && VIDEO_DEV
- ---help---
- Say Y here if you want support for cameras based on the ZC0301
- Image Processor and Control Chip.
-
- See <file:Documentation/usb/zc0301.txt> for more informations.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" to use this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called zc0301.
-
-config USB_PWC
- tristate "USB Philips Cameras"
- depends on USB && VIDEO_DEV
- ---help---
- Say Y or M here if you want to use one of these Philips & OEM
- webcams:
- * Philips PCA645, PCA646
- * Philips PCVC675, PCVC680, PCVC690
- * Philips PCVC720/40, PCVC730, PCVC740, PCVC750
- * Askey VC010
- * Logitech QuickCam Pro 3000, 4000, 'Zoom', 'Notebook Pro'
- and 'Orbit'/'Sphere'
- * Samsung MPC-C10, MPC-C30
- * Creative Webcam 5, Pro Ex
- * SOTEC Afina Eye
- * Visionite VCS-UC300, VCS-UM100
-
- The PCA635, PCVC665 and PCVC720/20 are not supported by this driver
- and never will be, but the 665 and 720/20 are supported by other
- drivers.
-
- See <file:Documentation/usb/philips.txt> for more information and
- installation instructions.
-
- The built-in microphone is enabled by selecting USB Audio support.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" (under Character Devices) to use this driver.
- Information on this API and pointers to "v4l" programs may be found
- at <file:Documentation/video4linux/API.html>.
+source "drivers/media/video/zc0301/Kconfig"
- To compile this driver as a module, choose M here: the
- module will be called pwc.
+source "drivers/media/video/pwc/Kconfig"
endmenu # V4L USB devices
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 1c0e72e5a593a..4092a5e37ffce 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o
obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
+obj-$(CONFIG_VIDEO_WM8739) += wm8739.o
obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip/
obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
obj-$(CONFIG_VIDEO_MXB) += saa7111.o tda9840.o tea6415c.o tea6420.o mxb.o
@@ -64,9 +65,8 @@ obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
obj-$(CONFIG_VIDEO_CX25840) += cx25840/
obj-$(CONFIG_VIDEO_SAA711X) += saa7115.o
obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
-
-et61x251-objs := et61x251_core.o et61x251_tas5130d1b.o
-zc0301-objs := zc0301_core.o zc0301_pas202bcb.o
+obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
+obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_USB_DABUSB) += dabusb.o
obj-$(CONFIG_USB_DSBR) += dsbr100.o
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 74def9c239521..423e954948beb 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -973,12 +973,12 @@ audio_mux(struct bttv *btv, int input, int mute)
For now this is sufficient. */
switch (input) {
case TVAUDIO_INPUT_RADIO:
- route.input = MSP_INPUT(MSP_IN_SCART_2, MSP_IN_TUNER_1,
- MSP_DSP_OUT_SCART, MSP_DSP_OUT_SCART);
+ route.input = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
+ MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
break;
case TVAUDIO_INPUT_EXTERN:
- route.input = MSP_INPUT(MSP_IN_SCART_1, MSP_IN_TUNER_1,
- MSP_DSP_OUT_SCART, MSP_DSP_OUT_SCART);
+ route.input = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1,
+ MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
break;
case TVAUDIO_INPUT_INTERN:
/* Yes, this is the same input as for RADIO. I doubt
@@ -986,8 +986,8 @@ audio_mux(struct bttv *btv, int input, int mute)
input is the BTTV_BOARD_AVERMEDIA98. I wonder how
that was tested. My guess is that the whole INTERN
input does not work. */
- route.input = MSP_INPUT(MSP_IN_SCART_2, MSP_IN_TUNER_1,
- MSP_DSP_OUT_SCART, MSP_DSP_OUT_SCART);
+ route.input = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
+ MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
break;
case TVAUDIO_INPUT_TUNER:
default:
@@ -1023,14 +1023,11 @@ audio_input(struct bttv *btv, int input)
static void
i2c_vidiocschan(struct bttv *btv)
{
- struct video_channel c;
+ v4l2_std_id std = bttv_tvnorms[btv->tvnorm].v4l2_id;
- memset(&c,0,sizeof(c));
- c.norm = btv->tvnorm;
- c.channel = btv->input;
- bttv_call_i2c_clients(btv,VIDIOCSCHAN,&c);
+ bttv_call_i2c_clients(btv, VIDIOC_S_STD, &std);
if (btv->c.type == BTTV_BOARD_VOODOOTV_FM)
- bttv_tda9880_setnorm(btv,c.norm);
+ bttv_tda9880_setnorm(btv,btv->tvnorm);
}
static int
@@ -1184,11 +1181,27 @@ static int get_control(struct bttv *btv, struct v4l2_control *c)
break;
if (i == BTTV_CTLS)
return -EINVAL;
- if (i >= 4 && i <= 8) {
+ if (btv->audio_hook && i >= 4 && i <= 8) {
memset(&va,0,sizeof(va));
- bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va);
- if (btv->audio_hook)
- btv->audio_hook(btv,&va,0);
+ btv->audio_hook(btv,&va,0);
+ switch (c->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ c->value = (VIDEO_AUDIO_MUTE & va.flags) ? 1 : 0;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ c->value = va.volume;
+ break;
+ case V4L2_CID_AUDIO_BALANCE:
+ c->value = va.balance;
+ break;
+ case V4L2_CID_AUDIO_BASS:
+ c->value = va.bass;
+ break;
+ case V4L2_CID_AUDIO_TREBLE:
+ c->value = va.treble;
+ break;
+ }
+ return 0;
}
switch (c->id) {
case V4L2_CID_BRIGHTNESS:
@@ -1205,19 +1218,11 @@ static int get_control(struct bttv *btv, struct v4l2_control *c)
break;
case V4L2_CID_AUDIO_MUTE:
- c->value = (VIDEO_AUDIO_MUTE & va.flags) ? 1 : 0;
- break;
case V4L2_CID_AUDIO_VOLUME:
- c->value = va.volume;
- break;
case V4L2_CID_AUDIO_BALANCE:
- c->value = va.balance;
- break;
case V4L2_CID_AUDIO_BASS:
- c->value = va.bass;
- break;
case V4L2_CID_AUDIO_TREBLE:
- c->value = va.treble;
+ bttv_call_i2c_clients(btv,VIDIOC_G_CTRL,c);
break;
case V4L2_CID_PRIVATE_CHROMA_AGC:
@@ -1269,11 +1274,35 @@ static int set_control(struct bttv *btv, struct v4l2_control *c)
break;
if (i == BTTV_CTLS)
return -EINVAL;
- if (i >= 4 && i <= 8) {
+ if (btv->audio_hook && i >= 4 && i <= 8) {
memset(&va,0,sizeof(va));
- bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va);
- if (btv->audio_hook)
- btv->audio_hook(btv,&va,0);
+ btv->audio_hook(btv,&va,0);
+ switch (c->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ if (c->value) {
+ va.flags |= VIDEO_AUDIO_MUTE;
+ audio_mute(btv, 1);
+ } else {
+ va.flags &= ~VIDEO_AUDIO_MUTE;
+ audio_mute(btv, 0);
+ }
+ break;
+
+ case V4L2_CID_AUDIO_VOLUME:
+ va.volume = c->value;
+ break;
+ case V4L2_CID_AUDIO_BALANCE:
+ va.balance = c->value;
+ break;
+ case V4L2_CID_AUDIO_BASS:
+ va.bass = c->value;
+ break;
+ case V4L2_CID_AUDIO_TREBLE:
+ va.treble = c->value;
+ break;
+ }
+ btv->audio_hook(btv,&va,1);
+ return 0;
}
switch (c->id) {
case V4L2_CID_BRIGHTNESS:
@@ -1289,26 +1318,13 @@ static int set_control(struct bttv *btv, struct v4l2_control *c)
bt848_sat(btv,c->value);
break;
case V4L2_CID_AUDIO_MUTE:
- if (c->value) {
- va.flags |= VIDEO_AUDIO_MUTE;
- audio_mute(btv, 1);
- } else {
- va.flags &= ~VIDEO_AUDIO_MUTE;
- audio_mute(btv, 0);
- }
- break;
-
+ audio_mute(btv, c->value);
+ /* fall through */
case V4L2_CID_AUDIO_VOLUME:
- va.volume = c->value;
- break;
case V4L2_CID_AUDIO_BALANCE:
- va.balance = c->value;
- break;
case V4L2_CID_AUDIO_BASS:
- va.bass = c->value;
- break;
case V4L2_CID_AUDIO_TREBLE:
- va.treble = c->value;
+ bttv_call_i2c_clients(btv,VIDIOC_S_CTRL,c);
break;
case V4L2_CID_PRIVATE_CHROMA_AGC:
@@ -1364,11 +1380,6 @@ static int set_control(struct bttv *btv, struct v4l2_control *c)
default:
return -EINVAL;
}
- if (i >= 4 && i <= 8) {
- bttv_call_i2c_clients(btv, VIDIOCSAUDIO, &va);
- if (btv->audio_hook)
- btv->audio_hook(btv,&va,1);
- }
return 0;
}
@@ -1591,12 +1602,16 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
}
case VIDIOCSFREQ:
{
- unsigned long *freq = arg;
+ struct v4l2_frequency freq;
+
+ memset(&freq, 0, sizeof(freq));
+ freq.frequency = *(unsigned long *)arg;
mutex_lock(&btv->lock);
- btv->freq=*freq;
- bttv_call_i2c_clients(btv,VIDIOCSFREQ,freq);
+ freq.type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ btv->freq = *(unsigned long *)arg;
+ bttv_call_i2c_clients(btv,VIDIOC_S_FREQUENCY,&freq);
if (btv->has_matchbox && btv->radio_user)
- tea5757_set_freq(btv,*freq);
+ tea5757_set_freq(btv,*(unsigned long *)arg);
mutex_unlock(&btv->lock);
return 0;
}
@@ -1827,33 +1842,26 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
return -EINVAL;
mutex_lock(&btv->lock);
memset(t,0,sizeof(*t));
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
+ bttv_call_i2c_clients(btv, VIDIOC_G_TUNER, t);
strcpy(t->name, "Television");
- t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM;
- t->rxsubchans = V4L2_TUNER_SUB_MONO;
+ t->type = V4L2_TUNER_ANALOG_TV;
if (btread(BT848_DSTATUS)&BT848_DSTATUS_HLOC)
t->signal = 0xffff;
- {
- struct video_tuner tuner;
-
- memset(&tuner, 0, sizeof (tuner));
- tuner.rangehigh = 0xffffffffUL;
- bttv_call_i2c_clients(btv, VIDIOCGTUNER, &tuner);
- t->rangelow = tuner.rangelow;
- t->rangehigh = tuner.rangehigh;
- }
- {
+
+ if (btv->audio_hook) {
/* Hmmm ... */
struct video_audio va;
memset(&va, 0, sizeof(struct video_audio));
- bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va);
- if (btv->audio_hook)
- btv->audio_hook(btv,&va,0);
+ btv->audio_hook(btv,&va,0);
+ t->audmode = V4L2_TUNER_MODE_MONO;
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
if(va.mode & VIDEO_SOUND_STEREO) {
- t->audmode = V4L2_TUNER_MODE_STEREO;
- t->rxsubchans |= V4L2_TUNER_SUB_STEREO;
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ t->rxsubchans = V4L2_TUNER_SUB_STEREO;
}
- if(va.mode & VIDEO_SOUND_LANG1) {
+ if(va.mode & VIDEO_SOUND_LANG2) {
t->audmode = V4L2_TUNER_MODE_LANG1;
t->rxsubchans = V4L2_TUNER_SUB_LANG1
| V4L2_TUNER_SUB_LANG2;
@@ -1872,10 +1880,10 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
if (0 != t->index)
return -EINVAL;
mutex_lock(&btv->lock);
- {
+ bttv_call_i2c_clients(btv, VIDIOC_S_TUNER, t);
+ if (btv->audio_hook) {
struct video_audio va;
memset(&va, 0, sizeof(struct video_audio));
- bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va);
if (t->audmode == V4L2_TUNER_MODE_MONO)
va.mode = VIDEO_SOUND_MONO;
else if (t->audmode == V4L2_TUNER_MODE_STEREO ||
@@ -1885,9 +1893,7 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
va.mode = VIDEO_SOUND_LANG1;
else if (t->audmode == V4L2_TUNER_MODE_LANG2)
va.mode = VIDEO_SOUND_LANG2;
- bttv_call_i2c_clients(btv, VIDIOCSAUDIO, &va);
- if (btv->audio_hook)
- btv->audio_hook(btv,&va,1);
+ btv->audio_hook(btv,&va,1);
}
mutex_unlock(&btv->lock);
return 0;
@@ -1912,7 +1918,7 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
return -EINVAL;
mutex_lock(&btv->lock);
btv->freq = f->frequency;
- bttv_call_i2c_clients(btv,VIDIOCSFREQ,&btv->freq);
+ bttv_call_i2c_clients(btv,VIDIOC_S_FREQUENCY,f);
if (btv->has_matchbox && btv->radio_user)
tea5757_set_freq(btv,btv->freq);
mutex_unlock(&btv->lock);
@@ -1920,7 +1926,9 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
}
case VIDIOC_LOG_STATUS:
{
+ printk(KERN_INFO "bttv%d: ================= START STATUS CARD #%d =================\n", btv->c.nr, btv->c.nr);
bttv_call_i2c_clients(btv, VIDIOC_LOG_STATUS, NULL);
+ printk(KERN_INFO "bttv%d: ================== END STATUS CARD #%d ==================\n", btv->c.nr, btv->c.nr);
return 0;
}
@@ -2870,12 +2878,10 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
return 0;
}
*c = bttv_ctls[i];
- if (i >= 4 && i <= 8) {
+ if (btv->audio_hook && i >= 4 && i <= 8) {
struct video_audio va;
memset(&va,0,sizeof(va));
- bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va);
- if (btv->audio_hook)
- btv->audio_hook(btv,&va,0);
+ btv->audio_hook(btv,&va,0);
switch (bttv_ctls[i].id) {
case V4L2_CID_AUDIO_VOLUME:
if (!(va.flags & VIDEO_AUDIO_VOLUME))
diff --git a/drivers/media/video/bt8xx/bttv-vbi.c b/drivers/media/video/bt8xx/bttv-vbi.c
index e20ff238e4092..8c9f0f7cf4677 100644
--- a/drivers/media/video/bt8xx/bttv-vbi.c
+++ b/drivers/media/video/bt8xx/bttv-vbi.c
@@ -184,7 +184,7 @@ void bttv_vbi_try_fmt(struct bttv_fh *fh, struct v4l2_format *f)
- tvnorm->vbistart[0];
count1 = (s64) f->fmt.vbi.start[1] + f->fmt.vbi.count[1]
- tvnorm->vbistart[1];
- count = clamp (max (count0, count1), 1LL, (s64) VBI_MAXLINES);
+ count = clamp (max (count0, count1), (s64) 1, (s64) VBI_MAXLINES);
f->fmt.vbi.start[0] = tvnorm->vbistart[0];
f->fmt.vbi.start[1] = tvnorm->vbistart[1];
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 2227c5640c12c..85d84e89d8f4d 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -64,14 +64,13 @@ MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("video");
#endif
-static unsigned short colorspace_conv = 0;
+static unsigned short colorspace_conv;
module_param(colorspace_conv, ushort, 0444);
MODULE_PARM_DESC(colorspace_conv,
- "\n<n> Colorspace conversion:"
- "\n0 = disable"
- "\n1 = enable"
- "\nDefault value is 0"
- "\n");
+ " Colorspace conversion:"
+ "\n 0 = disable, 1 = enable"
+ "\n Default value is 0"
+ );
#define ABOUT "V4L-Driver for Vision CPiA based cameras"
@@ -4042,7 +4041,7 @@ static int __init cpia_init(void)
"allowed, it is disabled by default now. Users should fix the "
"applications in case they don't work without conversion "
"reenabled by setting the 'colorspace_conv' module "
- "parameter to 1");
+ "parameter to 1\n");
#ifdef CONFIG_PROC_FS
proc_cpia_create();
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 8394283993f62..1764991b0ac98 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -456,7 +456,7 @@ int cpia2_init_camera(struct camera_data *cam);
int cpia2_allocate_buffers(struct camera_data *cam);
void cpia2_free_buffers(struct camera_data *cam);
long cpia2_read(struct camera_data *cam,
- char *buf, unsigned long count, int noblock);
+ char __user *buf, unsigned long count, int noblock);
unsigned int cpia2_poll(struct camera_data *cam,
struct file *filp, poll_table *wait);
int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index 3021f21aae36c..0b00e6027dfb3 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -873,7 +873,7 @@ static int __init cpia_pp_setup(char *str)
parport_nr[parport_ptr++] = PPCPIA_PARPORT_NONE;
}
- return 0;
+ return 1;
}
__setup("cpia_pp=", cpia_pp_setup);
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index a4540e858f213..9a4b813152e56 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -19,8 +19,9 @@
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <media/v4l2-common.h>
+#include <media/cx25840.h>
-#include "cx25840.h"
+#include "cx25840-core.h"
static int set_audclk_freq(struct i2c_client *client, u32 freq)
{
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index a65b3cc4bf03f..a961bb2ab0fdc 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -32,8 +32,9 @@
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <media/v4l2-common.h>
+#include <media/cx25840.h>
-#include "cx25840.h"
+#include "cx25840-core.h"
MODULE_DESCRIPTION("Conexant CX25840 audio/video decoder driver");
MODULE_AUTHOR("Ulf Eklund, Chris Kennedy, Hans Verkuil, Tyler Trafford");
@@ -668,6 +669,7 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
{
struct cx25840_state *state = i2c_get_clientdata(client);
struct v4l2_tuner *vt = arg;
+ struct v4l2_routing *route = arg;
switch (cmd) {
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -749,19 +751,21 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
state->radio = 1;
break;
- case VIDIOC_G_INPUT:
- *(int *)arg = state->vid_input;
+ case VIDIOC_INT_G_VIDEO_ROUTING:
+ route->input = state->vid_input;
+ route->output = 0;
break;
- case VIDIOC_S_INPUT:
- return set_input(client, *(enum cx25840_video_input *)arg, state->aud_input);
+ case VIDIOC_INT_S_VIDEO_ROUTING:
+ return set_input(client, route->input, state->aud_input);
- case VIDIOC_S_AUDIO:
- {
- struct v4l2_audio *input = arg;
+ case VIDIOC_INT_G_AUDIO_ROUTING:
+ route->input = state->aud_input;
+ route->output = 0;
+ break;
- return set_input(client, state->vid_input, input->index);
- }
+ case VIDIOC_INT_S_AUDIO_ROUTING:
+ return set_input(client, state->vid_input, route->input);
case VIDIOC_S_FREQUENCY:
input_change(client);
diff --git a/drivers/media/video/cx25840/cx25840.h b/drivers/media/video/cx25840/cx25840-core.h
index dd70664d1dd9b..1736929fc2049 100644
--- a/drivers/media/video/cx25840/cx25840.h
+++ b/drivers/media/video/cx25840/cx25840-core.h
@@ -1,4 +1,4 @@
-/* cx25840 API header
+/* cx25840 internal API header
*
* Copyright (C) 2003-2004 Chris Kennedy
*
@@ -17,8 +17,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-#ifndef _CX25840_H_
-#define _CX25840_H_
+#ifndef _CX25840_CORE_H_
+#define _CX25840_CORE_H_
#include <linux/videodev2.h>
@@ -32,46 +32,6 @@
providing this information. */
#define CX25840_CID_ENABLE_PVR150_WORKAROUND (V4L2_CID_PRIVATE_BASE+0)
-enum cx25840_video_input {
- /* Composite video inputs In1-In8 */
- CX25840_COMPOSITE1 = 1,
- CX25840_COMPOSITE2,
- CX25840_COMPOSITE3,
- CX25840_COMPOSITE4,
- CX25840_COMPOSITE5,
- CX25840_COMPOSITE6,
- CX25840_COMPOSITE7,
- CX25840_COMPOSITE8,
-
- /* S-Video inputs consist of one luma input (In1-In4) ORed with one
- chroma input (In5-In8) */
- CX25840_SVIDEO_LUMA1 = 0x10,
- CX25840_SVIDEO_LUMA2 = 0x20,
- CX25840_SVIDEO_LUMA3 = 0x30,
- CX25840_SVIDEO_LUMA4 = 0x40,
- CX25840_SVIDEO_CHROMA4 = 0x400,
- CX25840_SVIDEO_CHROMA5 = 0x500,
- CX25840_SVIDEO_CHROMA6 = 0x600,
- CX25840_SVIDEO_CHROMA7 = 0x700,
- CX25840_SVIDEO_CHROMA8 = 0x800,
-
- /* S-Video aliases for common luma/chroma combinations */
- CX25840_SVIDEO1 = 0x510,
- CX25840_SVIDEO2 = 0x620,
- CX25840_SVIDEO3 = 0x730,
- CX25840_SVIDEO4 = 0x840,
-};
-
-enum cx25840_audio_input {
- /* Audio inputs: serial or In4-In8 */
- CX25840_AUDIO_SERIAL,
- CX25840_AUDIO4 = 4,
- CX25840_AUDIO5,
- CX25840_AUDIO6,
- CX25840_AUDIO7,
- CX25840_AUDIO8,
-};
-
struct cx25840_state {
int pvr150_workaround;
int radio;
diff --git a/drivers/media/video/cx25840/cx25840-firmware.c b/drivers/media/video/cx25840/cx25840-firmware.c
index e1a7823d82cde..f59ced181c553 100644
--- a/drivers/media/video/cx25840/cx25840-firmware.c
+++ b/drivers/media/video/cx25840/cx25840-firmware.c
@@ -20,11 +20,22 @@
#include <linux/i2c-algo-bit.h>
#include <linux/firmware.h>
#include <media/v4l2-common.h>
+#include <media/cx25840.h>
-#include "cx25840.h"
+#include "cx25840-core.h"
#define FWFILE "v4l-cx25840.fw"
-#define FWSEND 1024
+
+/*
+ * Mike Isely <isely@pobox.com> - The FWSEND parameter controls the
+ * size of the firmware chunks sent down the I2C bus to the chip.
+ * Previously this had been set to 1024 but unfortunately some I2C
+ * implementations can't transfer data in such big gulps.
+ * Specifically, the pvrusb2 driver has a hard limit of around 60
+ * bytes, due to the encapsulation there of I2C traffic into USB
+ * messages. So we have to significantly reduce this parameter.
+ */
+#define FWSEND 48
#define FWDEV(x) &((x)->adapter->dev)
diff --git a/drivers/media/video/cx25840/cx25840-vbi.c b/drivers/media/video/cx25840/cx25840-vbi.c
index e96fd1f1d6dc3..57feca288d2ba 100644
--- a/drivers/media/video/cx25840/cx25840-vbi.c
+++ b/drivers/media/video/cx25840/cx25840-vbi.c
@@ -19,8 +19,9 @@
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <media/v4l2-common.h>
+#include <media/cx25840.h>
-#include "cx25840.h"
+#include "cx25840-core.h"
static int odd_parity(u8 c)
{
@@ -151,7 +152,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
case VIDIOC_G_FMT:
{
static u16 lcr2vbi[] = {
- 0, V4L2_SLICED_TELETEXT_PAL_B, 0, /* 1 */
+ 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
V4L2_SLICED_CAPTION_525, /* 6 */
0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */
@@ -231,7 +232,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
for (i = 7; i <= 23; i++) {
for (x = 0; x <= 1; x++) {
switch (svbi->service_lines[1-x][i]) {
- case V4L2_SLICED_TELETEXT_PAL_B:
+ case V4L2_SLICED_TELETEXT_B:
lcr[i] |= 1 << (4 * x);
break;
case V4L2_SLICED_WSS_625:
@@ -282,7 +283,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
switch (id2) {
case 1:
- id2 = V4L2_SLICED_TELETEXT_PAL_B;
+ id2 = V4L2_SLICED_TELETEXT_B;
break;
case 4:
id2 = V4L2_SLICED_WSS_625;
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index ff0f72340d691..630273992a419 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -1,3 +1,7 @@
+config VIDEO_CX88_VP3054
+ tristate
+ depends on VIDEO_CX88_DVB && DVB_MT352
+
config VIDEO_CX88
tristate "Conexant 2388x (bt878 successor) support"
depends on VIDEO_DEV && PCI && I2C
@@ -25,7 +29,7 @@ config VIDEO_CX88_ALSA
It only works with boards with function 01 enabled.
To check if your board supports, use lspci -n.
- If supported, you should see 1471:8801 or 1471:8811
+ If supported, you should see 14f1:8801 or 14f1:8811
PCI device.
To compile this driver as a module, choose M here: the
@@ -73,10 +77,11 @@ config VIDEO_CX88_DVB_MT352
This adds DVB-T support for cards based on the
Connexant 2388x chip and the MT352 demodulator.
-config VIDEO_CX88_VP3054
- tristate "VP-3054 Secondary I2C Bus Support"
- default m
- depends on DVB_MT352
+config VIDEO_CX88_DVB_VP3054
+ bool "VP-3054 Secondary I2C Bus Support"
+ default y
+ depends on VIDEO_CX88_DVB_MT352
+ select VIDEO_CX88_VP3054
---help---
This adds DVB-T support for cards based on the
Connexant 2388x chip and the MT352 demodulator,
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index f62fd706b45a0..3ba3439db580f 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -151,8 +151,8 @@ struct em28xx_board em28xx_boards[] = {
},{
.type = EM28XX_VMUX_SVIDEO,
.vmux = 2,
- .amux = MSP_INPUT(MSP_IN_SCART_1, MSP_IN_TUNER_1,
- MSP_DSP_OUT_SCART, MSP_DSP_OUT_SCART),
+ .amux = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1,
+ MSP_DSP_IN_SCART, MSP_DSP_IN_SCART),
}},
},
[EM2820_BOARD_MSI_VOX_USB_2] = {
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index dfba33d0fa617..ddc92cbb5276b 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -222,7 +222,7 @@ static void video_mux(struct em28xx *dev, int index)
if (dev->i2s_speed)
em28xx_i2c_call_clients(dev, VIDIOC_INT_I2S_CLOCK_FREQ, &dev->i2s_speed);
route.input = dev->ctl_ainput;
- route.output = MSP_OUTPUT(MSP_OUT_SCART1_DA);
+ route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1);
/* Note: this is msp3400 specific */
em28xx_i2c_call_clients(dev, VIDIOC_INT_S_AUDIO_ROUTING, &route);
ainput = EM28XX_AUDIO_SRC_TUNER;
@@ -1141,26 +1141,16 @@ static int em28xx_do_ioctl(struct inode *inode, struct file *filp,
case VIDIOC_G_TUNER:
{
struct v4l2_tuner *t = arg;
- int status = 0;
if (0 != t->index)
return -EINVAL;
memset(t, 0, sizeof(*t));
strcpy(t->name, "Tuner");
- t->type = V4L2_TUNER_ANALOG_TV;
- t->capability = V4L2_TUNER_CAP_NORM;
- t->rangehigh = 0xffffffffUL; /* FIXME: set correct range */
-/* t->signal = 0xffff;*/
-/* em28xx_i2c_call_clients(dev,VIDIOC_G_TUNER,t);*/
- /* No way to get signal strength? */
mutex_lock(&dev->lock);
- em28xx_i2c_call_clients(dev, DECODER_GET_STATUS,
- &status);
+ /* let clients fill in the remainder of this struct */
+ em28xx_i2c_call_clients(dev, cmd, t);
mutex_unlock(&dev->lock);
- t->signal =
- (status & DECODER_STATUS_GOOD) != 0 ? 0xffff : 0;
-
em28xx_videodbg("VIDIO_G_TUNER: signal=%x, afc=%x\n", t->signal,
t->afc);
return 0;
@@ -1168,26 +1158,13 @@ static int em28xx_do_ioctl(struct inode *inode, struct file *filp,
case VIDIOC_S_TUNER:
{
struct v4l2_tuner *t = arg;
- int status = 0;
if (0 != t->index)
return -EINVAL;
- memset(t, 0, sizeof(*t));
- strcpy(t->name, "Tuner");
- t->type = V4L2_TUNER_ANALOG_TV;
- t->capability = V4L2_TUNER_CAP_NORM;
- t->rangehigh = 0xffffffffUL; /* FIXME: set correct range */
-/* t->signal = 0xffff; */
- /* No way to get signal strength? */
mutex_lock(&dev->lock);
- em28xx_i2c_call_clients(dev, DECODER_GET_STATUS,
- &status);
+ /* let clients handle this */
+ em28xx_i2c_call_clients(dev, cmd, t);
mutex_unlock(&dev->lock);
- t->signal =
- (status & DECODER_STATUS_GOOD) != 0 ? 0xffff : 0;
-
- em28xx_videodbg("VIDIO_S_TUNER: signal=%x, afc=%x\n",
- t->signal, t->afc);
return 0;
}
case VIDIOC_G_FREQUENCY:
diff --git a/drivers/media/video/et61x251/Kconfig b/drivers/media/video/et61x251/Kconfig
new file mode 100644
index 0000000000000..6c43a90c6569f
--- /dev/null
+++ b/drivers/media/video/et61x251/Kconfig
@@ -0,0 +1,14 @@
+config USB_ET61X251
+ tristate "USB ET61X[12]51 PC Camera Controller support"
+ depends on USB && VIDEO_DEV
+ ---help---
+ Say Y here if you want support for cameras based on Etoms ET61X151
+ or ET61X251 PC Camera Controllers.
+
+ See <file:Documentation/video4linux/et61x251.txt> for more info.
+
+ This driver uses the Video For Linux API. You must say Y or M to
+ "Video For Linux" to use this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called et61x251.
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 95bacf435414e..7e66d83fe0ce6 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -411,6 +411,9 @@ static int ir_probe(struct i2c_adapter *adap)
case I2C_HW_B_BT848:
probe = probe_bttv;
break;
+ case I2C_HW_B_CX2341X:
+ probe = probe_bttv;
+ break;
case I2C_HW_SAA7134:
probe = probe_saa7134;
break;
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index c40e8ba9a2ea8..b806999d6e0fc 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -279,20 +279,8 @@ void msp_set_scart(struct i2c_client *client, int in, int out)
msp_write_dsp(client, 0x13, state->acb);
/* Sets I2S speed 0 = 1.024 Mbps, 1 = 2.048 Mbps */
- msp_write_dem(client, 0x40, state->i2s_mode);
-}
-
-void msp_set_mute(struct i2c_client *client)
-{
- struct msp_state *state = i2c_get_clientdata(client);
-
- v4l_dbg(1, msp_debug, client, "mute audio\n");
- msp_write_dsp(client, 0x0000, 0);
- msp_write_dsp(client, 0x0007, 1);
- if (state->has_scart2_out_volume)
- msp_write_dsp(client, 0x0040, 1);
- if (state->has_headphones)
- msp_write_dsp(client, 0x0006, 0);
+ if (state->has_i2s_conf)
+ msp_write_dem(client, 0x40, state->i2s_mode);
}
void msp_set_audio(struct i2c_client *client)
@@ -300,17 +288,19 @@ void msp_set_audio(struct i2c_client *client)
struct msp_state *state = i2c_get_clientdata(client);
int bal = 0, bass, treble, loudness;
int val = 0;
+ int reallymuted = state->muted | state->scan_in_progress;
- if (!state->muted)
+ if (!reallymuted)
val = (state->volume * 0x7f / 65535) << 8;
- v4l_dbg(1, msp_debug, client, "mute=%s volume=%d\n",
- state->muted ? "on" : "off", state->volume);
+ v4l_dbg(1, msp_debug, client, "mute=%s scanning=%s volume=%d\n",
+ state->muted ? "on" : "off", state->scan_in_progress ? "yes" : "no",
+ state->volume);
msp_write_dsp(client, 0x0000, val);
- msp_write_dsp(client, 0x0007, state->muted ? 0x1 : (val | 0x1));
+ msp_write_dsp(client, 0x0007, reallymuted ? 0x1 : (val | 0x1));
if (state->has_scart2_out_volume)
- msp_write_dsp(client, 0x0040, state->muted ? 0x1 : (val | 0x1));
+ msp_write_dsp(client, 0x0040, reallymuted ? 0x1 : (val | 0x1));
if (state->has_headphones)
msp_write_dsp(client, 0x0006, val);
if (!state->has_sound_processing)
@@ -346,7 +336,6 @@ static void msp_wake_thread(struct i2c_client *client)
if (NULL == state->kthread)
return;
- msp_set_mute(client);
state->watch_stereo = 0;
state->restart = 1;
wake_up_interruptible(&state->wq);
@@ -374,19 +363,15 @@ int msp_sleep(struct msp_state *state, int timeout)
/* ------------------------------------------------------------------------ */
-static int msp_mode_v4l2_to_v4l1(int rxsubchans)
+static int msp_mode_v4l2_to_v4l1(int rxsubchans, int audmode)
{
- int mode = 0;
-
- if (rxsubchans & V4L2_TUNER_SUB_STEREO)
- mode |= VIDEO_SOUND_STEREO;
- if (rxsubchans & V4L2_TUNER_SUB_LANG2)
- mode |= VIDEO_SOUND_LANG2 | VIDEO_SOUND_STEREO;
- if (rxsubchans & V4L2_TUNER_SUB_LANG1)
- mode |= VIDEO_SOUND_LANG1 | VIDEO_SOUND_STEREO;
- if (mode == 0)
- mode |= VIDEO_SOUND_MONO;
- return mode;
+ if (rxsubchans == V4L2_TUNER_SUB_MONO)
+ return VIDEO_SOUND_MONO;
+ if (rxsubchans == V4L2_TUNER_SUB_STEREO)
+ return VIDEO_SOUND_STEREO;
+ if (audmode == V4L2_TUNER_MODE_LANG2)
+ return VIDEO_SOUND_LANG2;
+ return VIDEO_SOUND_LANG1;
}
static int msp_mode_v4l1_to_v4l2(int mode)
@@ -605,7 +590,7 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
break;
if (state->opmode == OPMODE_AUTOSELECT)
msp_detect_stereo(client);
- va->mode = msp_mode_v4l2_to_v4l1(state->rxsubchans);
+ va->mode = msp_mode_v4l2_to_v4l1(state->rxsubchans, state->audmode);
break;
}
@@ -620,7 +605,8 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
state->treble = va->treble;
msp_set_audio(client);
- if (va->mode != 0 && state->radio == 0) {
+ if (va->mode != 0 && state->radio == 0 &&
+ state->audmode != msp_mode_v4l1_to_v4l2(va->mode)) {
state->audmode = msp_mode_v4l1_to_v4l2(va->mode);
msp_set_audmode(client);
}
@@ -687,21 +673,23 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
int sc_in = rt->input & 0x7;
int sc1_out = rt->output & 0xf;
int sc2_out = (rt->output >> 4) & 0xf;
- u16 val;
+ u16 val, reg;
+ if (state->routing.input == rt->input &&
+ state->routing.output == rt->output)
+ break;
state->routing = *rt;
- if (state->opmode == OPMODE_AUTOSELECT) {
- val = msp_read_dem(client, 0x30) & ~0x100;
- msp_write_dem(client, 0x30, val | (tuner ? 0x100 : 0));
- } else {
- val = msp_read_dem(client, 0xbb) & ~0x100;
- msp_write_dem(client, 0xbb, val | (tuner ? 0x100 : 0));
- }
msp_set_scart(client, sc_in, 0);
msp_set_scart(client, sc1_out, 1);
msp_set_scart(client, sc2_out, 2);
msp_set_audmode(client);
- msp_wake_thread(client);
+ reg = (state->opmode == OPMODE_AUTOSELECT) ? 0x30 : 0xbb;
+ val = msp_read_dem(client, reg);
+ if (tuner != ((val >> 8) & 1)) {
+ msp_write_dem(client, reg, (val & ~0x100) | (tuner << 8));
+ /* wake thread when a new tuner input is chosen */
+ msp_wake_thread(client);
+ }
break;
}
@@ -715,7 +703,7 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
msp_detect_stereo(client);
vt->audmode = state->audmode;
vt->rxsubchans = state->rxsubchans;
- vt->capability = V4L2_TUNER_CAP_STEREO |
+ vt->capability |= V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
break;
}
@@ -726,6 +714,8 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
if (state->radio) /* TODO: add mono/stereo support for radio */
break;
+ if (state->audmode == vt->audmode)
+ break;
state->audmode = vt->audmode;
/* only set audmode */
msp_set_audmode(client);
@@ -887,7 +877,7 @@ static int msp_attach(struct i2c_adapter *adapter, int address, int kind)
memset(state, 0, sizeof(*state));
state->v4l2_std = V4L2_STD_NTSC;
- state->audmode = V4L2_TUNER_MODE_LANG1;
+ state->audmode = V4L2_TUNER_MODE_STEREO;
state->volume = 58880; /* 0db gain */
state->balance = 32768; /* 0db gain */
state->bass = 32768;
@@ -931,13 +921,16 @@ static int msp_attach(struct i2c_adapter *adapter, int address, int kind)
state->has_radio = msp_revision >= 'G';
/* Has headphones output: not for stripped down products */
state->has_headphones = msp_prod_lo < 5;
+ /* Has scart2 input: not in stripped down products of the '3' family */
+ state->has_scart2 = msp_family >= 4 || msp_prod_lo < 7;
+ /* Has scart3 input: not in stripped down products of the '3' family */
+ state->has_scart3 = msp_family >= 4 || msp_prod_lo < 5;
/* Has scart4 input: not in pre D revisions, not in stripped D revs */
state->has_scart4 = msp_family >= 4 || (msp_revision >= 'D' && msp_prod_lo < 5);
- /* Has scart2 and scart3 inputs and scart2 output: not in stripped
- down products of the '3' family */
- state->has_scart23_in_scart2_out = msp_family >= 4 || msp_prod_lo < 5;
+ /* Has scart2 output: not in stripped down products of the '3' family */
+ state->has_scart2_out = msp_family >= 4 || msp_prod_lo < 5;
/* Has scart2 a volume control? Not in pre-D revisions. */
- state->has_scart2_out_volume = msp_revision > 'C' && state->has_scart23_in_scart2_out;
+ state->has_scart2_out_volume = msp_revision > 'C' && state->has_scart2_out;
/* Has a configurable i2s out? */
state->has_i2s_conf = msp_revision >= 'G' && msp_prod_lo < 7;
/* Has subwoofer output: not in pre-D revs and not in stripped down products */
diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h
index 1940748bb6338..4e451049013de 100644
--- a/drivers/media/video/msp3400-driver.h
+++ b/drivers/media/video/msp3400-driver.h
@@ -54,8 +54,10 @@ struct msp_state {
u8 has_radio;
u8 has_headphones;
u8 has_ntsc_jp_d_k3;
+ u8 has_scart2;
+ u8 has_scart3;
u8 has_scart4;
- u8 has_scart23_in_scart2_out;
+ u8 has_scart2_out;
u8 has_scart2_out_volume;
u8 has_i2s_conf;
u8 has_subwoofer;
@@ -83,6 +85,7 @@ struct msp_state {
int volume, muted;
int balance, loudness;
int bass, treble;
+ int scan_in_progress;
/* thread */
struct task_struct *kthread;
@@ -98,7 +101,6 @@ int msp_read_dem(struct i2c_client *client, int addr);
int msp_read_dsp(struct i2c_client *client, int addr);
int msp_reset(struct i2c_client *client);
void msp_set_scart(struct i2c_client *client, int in, int out);
-void msp_set_mute(struct i2c_client *client);
void msp_set_audio(struct i2c_client *client);
int msp_sleep(struct msp_state *state, int timeout);
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index c3984ea9ca076..633a102137893 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -170,7 +170,7 @@ static void msp_set_source(struct i2c_client *client, u16 src)
msp_write_dsp(client, 0x000a, src);
msp_write_dsp(client, 0x000b, src);
msp_write_dsp(client, 0x000c, src);
- if (state->has_scart23_in_scart2_out)
+ if (state->has_scart2_out)
msp_write_dsp(client, 0x0041, src);
}
@@ -228,6 +228,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
char *modestr = (state->audmode >= 0 && state->audmode < 5) ?
strmode[state->audmode] : "unknown";
int src = 0; /* channel source: FM/AM, nicam or SCART */
+ int audmode = state->audmode;
if (state->opmode == OPMODE_AUTOSELECT) {
/* this method would break everything, let's make sure
@@ -239,11 +240,29 @@ static void msp3400c_set_audmode(struct i2c_client *client)
return;
}
+ /* Note: for the C and D revs no NTSC stereo + SAP is possible as
+ the hardware does not support SAP. So the rxsubchans combination
+ of STEREO | LANG2 does not occur. */
+
+ /* switch to mono if only mono is available */
+ if (state->rxsubchans == V4L2_TUNER_SUB_MONO)
+ audmode = V4L2_TUNER_MODE_MONO;
+ /* if bilingual */
+ else if (state->rxsubchans & V4L2_TUNER_SUB_LANG2) {
+ /* and mono or stereo, then fallback to lang1 */
+ if (audmode == V4L2_TUNER_MODE_MONO ||
+ audmode == V4L2_TUNER_MODE_STEREO)
+ audmode = V4L2_TUNER_MODE_LANG1;
+ }
+ /* if stereo, and audmode is not mono, then switch to stereo */
+ else if (audmode != V4L2_TUNER_MODE_MONO)
+ audmode = V4L2_TUNER_MODE_STEREO;
+
/* switch demodulator */
switch (state->mode) {
case MSP_MODE_FM_TERRA:
v4l_dbg(1, msp_debug, client, "FM set_audmode: %s\n", modestr);
- switch (state->audmode) {
+ switch (audmode) {
case V4L2_TUNER_MODE_STEREO:
msp_write_dsp(client, 0x000e, 0x3001);
break;
@@ -257,7 +276,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
break;
case MSP_MODE_FM_SAT:
v4l_dbg(1, msp_debug, client, "SAT set_audmode: %s\n", modestr);
- switch (state->audmode) {
+ switch (audmode) {
case V4L2_TUNER_MODE_MONO:
msp3400c_set_carrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5));
break;
@@ -296,7 +315,8 @@ static void msp3400c_set_audmode(struct i2c_client *client)
}
/* switch audio */
- switch (state->audmode) {
+ v4l_dbg(1, msp_debug, client, "set audmode %d\n", audmode);
+ switch (audmode) {
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1_LANG2:
src |= 0x0020;
@@ -314,10 +334,6 @@ static void msp3400c_set_audmode(struct i2c_client *client)
src = 0x0030;
break;
case V4L2_TUNER_MODE_LANG1:
- /* switch to stereo for stereo transmission, otherwise
- keep first language */
- if (state->rxsubchans & V4L2_TUNER_SUB_STEREO)
- src |= 0x0020;
break;
case V4L2_TUNER_MODE_LANG2:
src |= 0x0010;
@@ -367,7 +383,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
if (val > 32767)
val -= 65536;
v4l_dbg(2, msp_debug, client, "stereo detect register: %d\n", val);
- if (val > 4096) {
+ if (val > 8192) {
rxsubchans = V4L2_TUNER_SUB_STEREO;
} else if (val < -4096) {
rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
@@ -464,19 +480,22 @@ int msp3400c_thread(void *data)
if (state->radio || MSP_MODE_EXTERN == state->mode) {
/* no carrier scan, just unmute */
v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n");
+ state->scan_in_progress = 0;
msp_set_audio(client);
continue;
}
- /* mute */
- msp_set_mute(client);
+ /* mute audio */
+ state->scan_in_progress = 1;
+ msp_set_audio(client);
+
msp3400c_set_mode(client, MSP_MODE_AM_DETECT);
val1 = val2 = 0;
max1 = max2 = -1;
state->watch_stereo = 0;
state->nicam_on = 0;
- /* some time for the tuner to sync */
+ /* wait for tuner to settle down after a channel change */
if (msp_sleep(state, 200))
goto restart;
@@ -552,7 +571,6 @@ int msp3400c_thread(void *data)
/* B/G NICAM */
state->second = msp3400c_carrier_detect_55[max2].cdo;
msp3400c_set_mode(client, MSP_MODE_FM_NICAM1);
- msp3400c_set_carrier(client, state->second, state->main);
state->nicam_on = 1;
state->watch_stereo = 1;
} else {
@@ -563,7 +581,6 @@ int msp3400c_thread(void *data)
/* PAL I NICAM */
state->second = MSP_CARRIER(6.552);
msp3400c_set_mode(client, MSP_MODE_FM_NICAM2);
- msp3400c_set_carrier(client, state->second, state->main);
state->nicam_on = 1;
state->watch_stereo = 1;
break;
@@ -577,13 +594,11 @@ int msp3400c_thread(void *data)
/* L NICAM or AM-mono */
state->second = msp3400c_carrier_detect_65[max2].cdo;
msp3400c_set_mode(client, MSP_MODE_AM_NICAM);
- msp3400c_set_carrier(client, state->second, state->main);
state->watch_stereo = 1;
} else if (max2 == 0 && state->has_nicam) {
/* D/K NICAM */
state->second = msp3400c_carrier_detect_65[max2].cdo;
msp3400c_set_mode(client, MSP_MODE_FM_NICAM1);
- msp3400c_set_carrier(client, state->second, state->main);
state->nicam_on = 1;
state->watch_stereo = 1;
} else {
@@ -595,25 +610,25 @@ int msp3400c_thread(void *data)
no_second:
state->second = msp3400c_carrier_detect_main[max1].cdo;
msp3400c_set_mode(client, MSP_MODE_FM_TERRA);
- msp3400c_set_carrier(client, state->second, state->main);
- state->rxsubchans = V4L2_TUNER_SUB_MONO;
break;
}
+ msp3400c_set_carrier(client, state->second, state->main);
/* unmute */
- msp_set_audio(client);
+ state->scan_in_progress = 0;
msp3400c_set_audmode(client);
+ msp_set_audio(client);
if (msp_debug)
msp3400c_print_mode(client);
/* monitor tv audio mode, the first time don't wait
so long to get a quick stereo/bilingual result */
- if (msp_sleep(state, 1000))
- goto restart;
+ count = 3;
while (state->watch_stereo) {
- if (msp_sleep(state, 5000))
+ if (msp_sleep(state, count ? 1000 : 5000))
goto restart;
+ if (count) count--;
watch_stereo(client);
}
}
@@ -626,7 +641,7 @@ int msp3410d_thread(void *data)
{
struct i2c_client *client = data;
struct msp_state *state = i2c_get_clientdata(client);
- int val, i, std;
+ int val, i, std, count;
v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
@@ -644,16 +659,14 @@ int msp3410d_thread(void *data)
if (state->mode == MSP_MODE_EXTERN) {
/* no carrier scan needed, just unmute */
v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n");
+ state->scan_in_progress = 0;
msp_set_audio(client);
continue;
}
- /* put into sane state (and mute) */
- msp_reset(client);
-
- /* some time for the tuner to sync */
- if (msp_sleep(state,200))
- goto restart;
+ /* mute audio */
+ state->scan_in_progress = 1;
+ msp_set_audio(client);
/* start autodetect. Note: autodetect is not supported for
NTSC-M and radio, hence we force the standard in those cases. */
@@ -664,6 +677,10 @@ int msp3410d_thread(void *data)
state->watch_stereo = 0;
state->nicam_on = 0;
+ /* wait for tuner to settle down after a channel change */
+ if (msp_sleep(state, 200))
+ goto restart;
+
if (msp_debug)
v4l_dbg(2, msp_debug, client, "setting standard: %s (0x%04x)\n",
msp_standard_std_name(std), std);
@@ -693,6 +710,7 @@ int msp3410d_thread(void *data)
state->main = msp_stdlist[i].main;
state->second = msp_stdlist[i].second;
state->std = val;
+ state->rxsubchans = V4L2_TUNER_SUB_MONO;
if (msp_amsound && !state->radio && (state->v4l2_std & V4L2_STD_SECAM) &&
(val != 0x0009)) {
@@ -714,20 +732,17 @@ int msp3410d_thread(void *data)
else
state->mode = MSP_MODE_FM_NICAM1;
/* just turn on stereo */
- state->rxsubchans = V4L2_TUNER_SUB_STEREO;
state->nicam_on = 1;
state->watch_stereo = 1;
break;
case 0x0009:
state->mode = MSP_MODE_AM_NICAM;
- state->rxsubchans = V4L2_TUNER_SUB_MONO;
state->nicam_on = 1;
state->watch_stereo = 1;
break;
case 0x0020: /* BTSC */
/* The pre-'G' models only have BTSC-mono */
state->mode = MSP_MODE_BTSC;
- state->rxsubchans = V4L2_TUNER_SUB_MONO;
break;
case 0x0040: /* FM radio */
state->mode = MSP_MODE_FM_RADIO;
@@ -737,15 +752,12 @@ int msp3410d_thread(void *data)
msp3400c_set_mode(client, MSP_MODE_FM_RADIO);
msp3400c_set_carrier(client, MSP_CARRIER(10.7),
MSP_CARRIER(10.7));
- /* scart routing (this doesn't belong here I think) */
- msp_set_scart(client,SCART_IN2,0);
break;
case 0x0002:
case 0x0003:
case 0x0004:
case 0x0005:
state->mode = MSP_MODE_FM_TERRA;
- state->rxsubchans = V4L2_TUNER_SUB_MONO;
state->watch_stereo = 1;
break;
}
@@ -759,20 +771,19 @@ int msp3410d_thread(void *data)
if (state->has_i2s_conf)
msp_write_dem(client, 0x40, state->i2s_mode);
- /* unmute, restore misc registers */
- msp_set_audio(client);
-
- msp_write_dsp(client, 0x13, state->acb);
+ /* unmute */
msp3400c_set_audmode(client);
+ state->scan_in_progress = 0;
+ msp_set_audio(client);
/* monitor tv audio mode, the first time don't wait
so long to get a quick stereo/bilingual result */
- if (msp_sleep(state, 1000))
- goto restart;
+ count = 3;
while (state->watch_stereo) {
- watch_stereo(client);
- if (msp_sleep(state, 5000))
+ if (msp_sleep(state, count ? 1000 : 5000))
goto restart;
+ if (count) count--;
+ watch_stereo(client);
}
}
v4l_dbg(1, msp_debug, client, "thread: exit\n");
@@ -829,27 +840,27 @@ static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in)
source = 0; /* mono only */
matrix = 0x30;
break;
- case V4L2_TUNER_MODE_LANG1:
- source = 3; /* stereo or A */
- matrix = 0x00;
- break;
case V4L2_TUNER_MODE_LANG2:
source = 4; /* stereo or B */
matrix = 0x10;
break;
- case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1_LANG2:
- default:
source = 1; /* stereo or A|B */
matrix = 0x20;
break;
+ case V4L2_TUNER_MODE_STEREO:
+ case V4L2_TUNER_MODE_LANG1:
+ default:
+ source = 3; /* stereo or A */
+ matrix = 0x00;
+ break;
}
- if (in == MSP_DSP_OUT_TUNER)
+ if (in == MSP_DSP_IN_TUNER)
source = (source << 8) | 0x20;
/* the msp34x2g puts the MAIN_AVC, MAIN and AUX sources in 12, 13, 14
instead of 11, 12, 13. So we add one for that msp version. */
- else if (in >= MSP_DSP_OUT_MAIN_AVC && state->has_dolby_pro_logic)
+ else if (in >= MSP_DSP_IN_MAIN_AVC && state->has_dolby_pro_logic)
source = ((in + 1) << 8) | matrix;
else
source = (in << 8) | matrix;
@@ -869,7 +880,7 @@ static void msp34xxg_set_sources(struct i2c_client *client)
msp34xxg_set_source(client, 0x000c, (in >> 4) & 0xf);
msp34xxg_set_source(client, 0x0009, (in >> 8) & 0xf);
msp34xxg_set_source(client, 0x000a, (in >> 12) & 0xf);
- if (state->has_scart23_in_scart2_out)
+ if (state->has_scart2_out)
msp34xxg_set_source(client, 0x0041, (in >> 16) & 0xf);
msp34xxg_set_source(client, 0x000b, (in >> 20) & 0xf);
}
@@ -887,10 +898,6 @@ static void msp34xxg_reset(struct i2c_client *client)
msp_reset(client);
- /* make sure that input/output is muted (paranoid mode) */
- /* ACB, mute DSP input, mute SCART 1 */
- msp_write_dsp(client, 0x13, 0x0f20);
-
if (state->has_i2s_conf)
msp_write_dem(client, 0x40, state->i2s_mode);
@@ -1028,7 +1035,7 @@ static void msp34xxg_set_audmode(struct i2c_client *client)
if (state->std == 0x20) {
if ((state->rxsubchans & V4L2_TUNER_SUB_SAP) &&
- (state->audmode == V4L2_TUNER_MODE_STEREO ||
+ (state->audmode == V4L2_TUNER_MODE_LANG1_LANG2 ||
state->audmode == V4L2_TUNER_MODE_LANG2)) {
msp_write_dem(client, 0x20, 0x21);
} else {
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
new file mode 100644
index 0000000000000..86376556f1083
--- /dev/null
+++ b/drivers/media/video/pwc/Kconfig
@@ -0,0 +1,28 @@
+config USB_PWC
+ tristate "USB Philips Cameras"
+ depends on USB && VIDEO_DEV
+ ---help---
+ Say Y or M here if you want to use one of these Philips & OEM
+ webcams:
+ * Philips PCA645, PCA646
+ * Philips PCVC675, PCVC680, PCVC690
+ * Philips PCVC720/40, PCVC730, PCVC740, PCVC750
+ * Askey VC010
+ * Logitech QuickCam Pro 3000, 4000, 'Zoom', 'Notebook Pro'
+ and 'Orbit'/'Sphere'
+ * Samsung MPC-C10, MPC-C30
+ * Creative Webcam 5, Pro Ex
+ * SOTEC Afina Eye
+ * Visionite VCS-UC300, VCS-UM100
+
+ The PCA635, PCVC665 and PCVC720/20 are not supported by this driver
+ and never will be, but the 665 and 720/20 are supported by other
+ drivers.
+
+ See <file:Documentation/usb/philips.txt> for more information and
+ installation instructions.
+
+ The built-in microphone is enabled by selecting USB Audio support.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pwc.
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index b05015282601d..dceebc0b1250a 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -40,6 +40,7 @@
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/saa7115.h>
#include <asm/div64.h>
MODULE_DESCRIPTION("Philips SAA7113/SAA7114/SAA7115 video decoder driver");
@@ -53,7 +54,7 @@ module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
static unsigned short normal_i2c[] = {
- 0x4a >>1, 0x48 >>1, /* SAA7113 */
+ 0x4a >> 1, 0x48 >> 1, /* SAA7113 */
0x42 >> 1, 0x40 >> 1, /* SAA7114 and SAA7115 */
I2C_CLIENT_END };
@@ -722,16 +723,16 @@ static void saa7115_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
100 reserved NTSC-Japan (3.58MHz)
*/
if (state->ident == V4L2_IDENT_SAA7113) {
- u8 reg = saa7115_read(client, 0x0e) & 0x8f;
+ u8 reg = saa7115_read(client, 0x0e) & 0x8f;
if (std == V4L2_STD_PAL_M) {
- reg|=0x30;
+ reg |= 0x30;
} else if (std == V4L2_STD_PAL_N) {
- reg|=0x20;
+ reg |= 0x20;
} else if (std == V4L2_STD_PAL_60) {
- reg|=0x10;
+ reg |= 0x10;
} else if (std == V4L2_STD_NTSC_M_JP) {
- reg|=0x40;
+ reg |= 0x40;
}
saa7115_write(client, 0x0e, reg);
}
@@ -811,7 +812,7 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
u8 lcr[24];
int i, x;
- /* saa7113/71144 doesn't yet support VBI */
+ /* saa7113/7114 doesn't yet support VBI */
if (state->ident != V4L2_IDENT_SAA7115)
return;
@@ -851,7 +852,7 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
case 0:
lcr[i] |= 0xf << (4 * x);
break;
- case V4L2_SLICED_TELETEXT_PAL_B:
+ case V4L2_SLICED_TELETEXT_B:
lcr[i] |= 1 << (4 * x);
break;
case V4L2_SLICED_CAPTION_525:
@@ -880,7 +881,7 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
static int saa7115_get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt)
{
static u16 lcr2vbi[] = {
- 0, V4L2_SLICED_TELETEXT_PAL_B, 0, /* 1 */
+ 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_CAPTION_525, /* 4 */
V4L2_SLICED_WSS_625, 0, /* 5 */
V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */
@@ -1045,7 +1046,7 @@ static void saa7115_decode_vbi_line(struct i2c_client *client,
/* decode payloads */
switch (id2) {
case 1:
- vbi->type = V4L2_SLICED_TELETEXT_PAL_B;
+ vbi->type = V4L2_SLICED_TELETEXT_B;
break;
case 4:
if (!saa7115_odd_parity(p[0]) || !saa7115_odd_parity(p[1]))
@@ -1180,6 +1181,46 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
state->radio = 1;
break;
+ case VIDIOC_INT_G_VIDEO_ROUTING:
+ {
+ struct v4l2_routing *route = arg;
+
+ route->input = state->input;
+ route->output = 0;
+ break;
+ }
+
+ case VIDIOC_INT_S_VIDEO_ROUTING:
+ {
+ struct v4l2_routing *route = arg;
+
+ v4l_dbg(1, debug, client, "decoder set input %d\n", route->input);
+ /* saa7113 does not have these inputs */
+ if (state->ident == V4L2_IDENT_SAA7113 &&
+ (route->input == SAA7115_COMPOSITE4 ||
+ route->input == SAA7115_COMPOSITE5)) {
+ return -EINVAL;
+ }
+ if (route->input > SAA7115_SVIDEO3)
+ return -EINVAL;
+ if (state->input == route->input)
+ break;
+ v4l_dbg(1, debug, client, "now setting %s input\n",
+ (route->input >= SAA7115_SVIDEO0) ? "S-Video" : "Composite");
+ state->input = route->input;
+
+ /* select mode */
+ saa7115_write(client, 0x02,
+ (saa7115_read(client, 0x02) & 0xf0) |
+ state->input);
+
+ /* bypass chrominance trap for S-Video modes */
+ saa7115_write(client, 0x09,
+ (saa7115_read(client, 0x09) & 0x7f) |
+ (state->input >= SAA7115_SVIDEO0 ? 0x80 : 0x0));
+ break;
+ }
+
case VIDIOC_G_INPUT:
*(int *)arg = state->input;
break;
@@ -1321,7 +1362,7 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
saa7115_write(client, 0, 5);
chip_id = saa7115_read(client, 0) & 0x0f;
- if (chip_id <3 && chip_id > 5) {
+ if (chip_id < 3 && chip_id > 5) {
v4l_dbg(1, debug, client, "saa7115 not found\n");
kfree(client);
return 0;
@@ -1360,7 +1401,7 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
v4l_dbg(1, debug, client, "writing init values\n");
/* init to 60hz/48khz */
- if (state->ident==V4L2_IDENT_SAA7113)
+ if (state->ident == V4L2_IDENT_SAA7113)
saa7115_writeregs(client, saa7113_init_auto_input);
else
saa7115_writeregs(client, saa7115_init_auto_input);
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 992c71774f303..133f9e5252fec 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -54,6 +54,7 @@
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/saa7127.h>
static int debug = 0;
static int test_image = 0;
@@ -222,22 +223,6 @@ static struct i2c_reg_value saa7127_init_config_50hz[] = {
{ 0, 0 }
};
-/* Enumeration for the Supported input types */
-enum saa7127_input_type {
- SAA7127_INPUT_TYPE_NORMAL,
- SAA7127_INPUT_TYPE_TEST_IMAGE
-};
-
-/* Enumeration for the Supported Output signal types */
-enum saa7127_output_type {
- SAA7127_OUTPUT_TYPE_BOTH,
- SAA7127_OUTPUT_TYPE_COMPOSITE,
- SAA7127_OUTPUT_TYPE_SVIDEO,
- SAA7127_OUTPUT_TYPE_RGB,
- SAA7127_OUTPUT_TYPE_YUV_C,
- SAA7127_OUTPUT_TYPE_YUV_V
-};
-
/*
**********************************************************************
*
@@ -561,7 +546,7 @@ static int saa7127_command(struct i2c_client *client,
{
struct saa7127_state *state = i2c_get_clientdata(client);
struct v4l2_format *fmt = arg;
- int *iarg = arg;
+ struct v4l2_routing *route = arg;
switch (cmd) {
case VIDIOC_S_STD:
@@ -573,15 +558,23 @@ static int saa7127_command(struct i2c_client *client,
*(v4l2_std_id *)arg = state->std;
break;
- case VIDIOC_S_INPUT:
- if (state->input_type == *iarg)
- break;
- return saa7127_set_input_type(client, *iarg);
+ case VIDIOC_INT_G_VIDEO_ROUTING:
+ route->input = state->input_type;
+ route->output = state->output_type;
+ break;
- case VIDIOC_S_OUTPUT:
- if (state->output_type == *iarg)
- break;
- return saa7127_set_output_type(client, *iarg);
+ case VIDIOC_INT_S_VIDEO_ROUTING:
+ {
+ int rc = 0;
+
+ if (state->input_type != route->input) {
+ rc = saa7127_set_input_type(client, route->input);
+ }
+ if (rc == 0 && state->output_type != route->output) {
+ rc = saa7127_set_output_type(client, route->output);
+ }
+ return rc;
+ }
case VIDIOC_STREAMON:
case VIDIOC_STREAMOFF:
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 86671a43e7698..e1c1805df1fb6 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -39,6 +39,7 @@ config VIDEO_SAA7134_DVB
tristate "DVB/ATSC Support for saa7134 based TV cards"
depends on VIDEO_SAA7134 && DVB_CORE
select VIDEO_BUF_DVB
+ select FW_LOADER
---help---
This adds support for DVB cards based on the
Philips saa7134 chip.
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index fdd7f48f3b76f..e666a4465ca46 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -208,7 +208,7 @@ struct saa7134_board saa7134_boards[] = {
[SAA7134_BOARD_FLYTVPLATINUM_FM] = {
/* LifeView FlyTV Platinum FM (LR214WF) */
/* "Peter Missel <peter.missel@onlinehome.de> */
- .name = "LifeView FlyTV Platinum FM",
+ .name = "LifeView FlyTV Platinum FM / Gold",
.audio_clock = 0x00200000,
.tuner_type = TUNER_PHILIPS_TDA8290,
.radio_type = UNSET,
@@ -2660,7 +2660,7 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
.name = name_comp1,
- .vmux = 0,
+ .vmux = 1,
.amux = LINE1,
},{
.name = name_svideo,
@@ -2671,7 +2671,7 @@ struct saa7134_board saa7134_boards[] = {
[SAA7134_BOARD_FLYDVBT_LR301] = {
/* LifeView FlyDVB-T */
/* Giampiero Giancipoli <gianci@libero.it> */
- .name = "LifeView FlyDVB-T",
+ .name = "LifeView FlyDVB-T / Genius VideoWonder DVB-T",
.audio_clock = 0x00200000,
.tuner_type = TUNER_ABSENT,
.radio_type = UNSET,
@@ -2808,6 +2808,40 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
+ [SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS] = {
+ .name = "LifeView FlyDVB-T Hybrid Cardbus",
+ .audio_clock = 0x00200000,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
+ .gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 1,
+ .amux = TV,
+ .gpio = 0x200000, /* GPIO21=High for TV input */
+ .tv = 1,
+ },{
+ .name = name_svideo, /* S-Video signal on S-Video input */
+ .vmux = 8,
+ .amux = LINE2,
+ },{
+ .name = name_comp1, /* Composite signal on S-Video input */
+ .vmux = 0,
+ .amux = LINE2,
+ },{
+ .name = name_comp2, /* Composite input */
+ .vmux = 3,
+ .amux = LINE2,
+ }},
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
+ },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -3333,6 +3367,30 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x0005,
.driver_data = SAA7134_BOARD_MD7134_BRIDGE_2,
},{
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
+ .subvendor = 0x1489,
+ .subdevice = 0x0301,
+ .driver_data = SAA7134_BOARD_FLYDVBT_LR301,
+ },{
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5168, /* Animation Technologies (LifeView) */
+ .subdevice = 0x0304,
+ .driver_data = SAA7134_BOARD_FLYTVPLATINUM_FM,
+ },{
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5168,
+ .subdevice = 0x3306,
+ .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS,
+ },{
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5168,
+ .subdevice = 0x3502, /* whats the difference to 0x3306 ?*/
+ .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS,
+ },{
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -3462,6 +3520,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x06);
break;
case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
+ case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
saa_writeb(SAA7134_GPIO_GPMODE3, 0x08);
saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x00);
break;
@@ -3633,6 +3692,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
}
break;
case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
+ case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
/* make the tda10046 find its eeprom */
{
u8 data[] = { 0x3c, 0x33, 0x62};
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 86cfdb8514cb5..222a36c389175 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1064,6 +1064,10 @@ static int dvb_init(struct saa7134_dev *dev)
dev->dvb.frontend = tda10046_attach(&tevion_dvbt220rf_config,
&dev->i2c_adap);
break;
+ case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
+ dev->dvb.frontend = tda10046_attach(&ads_tech_duo_config,
+ &dev->i2c_adap);
+ break;
#endif
#ifdef HAVE_NXT200X
case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180:
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 31ba293854c19..353af3a8b766b 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -220,6 +220,7 @@ struct saa7134_format {
#define SAA7134_BOARD_AVERMEDIA_A169_B 91
#define SAA7134_BOARD_AVERMEDIA_A169_B1 92
#define SAA7134_BOARD_MD7134_BRIDGE_2 93
+#define SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS 94
#define SAA7134_MAXBOARDS 8
#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/video/sn9c102/Kconfig b/drivers/media/video/sn9c102/Kconfig
new file mode 100644
index 0000000000000..55f2bc11964ba
--- /dev/null
+++ b/drivers/media/video/sn9c102/Kconfig
@@ -0,0 +1,11 @@
+config USB_SN9C102
+ tristate "USB SN9C10x PC Camera Controller support"
+ depends on USB && VIDEO_DEV
+ ---help---
+ Say Y here if you want support for cameras based on SONiX SN9C101,
+ SN9C102 or SN9C103 PC Camera Controllers.
+
+ See <file:Documentation/video4linux/sn9c102.txt> for more info.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sn9c102.
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index df195c905366c..1013b4de89a27 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -401,7 +401,7 @@ static void tuner_status(struct i2c_client *client)
}
tuner_info("Tuner mode: %s\n", p);
tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
- tuner_info("Standard: 0x%08llx\n", t->std);
+ tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std);
if (t->mode != V4L2_TUNER_RADIO)
return;
if (t->has_signal) {
@@ -558,10 +558,10 @@ static inline int set_mode(struct i2c_client *client, struct tuner *t, int mode,
static inline int check_v4l2(struct tuner *t)
{
- if (t->using_v4l2) {
- tuner_dbg ("ignore v4l1 call\n");
- return EINVAL;
- }
+ /* bttv still uses both v4l1 and v4l2 calls to the tuner (v4l2 for
+ TV, v4l1 for radio), until that is fixed this code is disabled.
+ Otherwise the radio (v4l1) wouldn't tune after using the TV (v4l2)
+ first. */
return 0;
}
@@ -744,6 +744,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
switch_v4l2();
tuner->type = t->mode;
+ if (t->mode == V4L2_TUNER_ANALOG_TV)
+ tuner->capability |= V4L2_TUNER_CAP_NORM;
if (t->mode != V4L2_TUNER_RADIO) {
tuner->rangelow = tv_range[0] * 16;
tuner->rangehigh = tv_range[1] * 16;
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index 356bff455ad18..c2b756107548f 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -1706,21 +1706,6 @@ static int chip_command(struct i2c_client *client,
break;
}
- case VIDIOC_S_AUDIO:
- {
- struct v4l2_audio *sarg = arg;
-
- if (!(desc->flags & CHIP_HAS_INPUTSEL) || sarg->index >= 4)
- return -EINVAL;
- /* There are four inputs: tuner, radio, extern and intern. */
- chip->input = sarg->index;
- if (chip->muted)
- break;
- chip_write_masked(chip, desc->inputreg,
- desc->inputmap[chip->input], desc->inputmask);
- break;
- }
-
case VIDIOC_S_TUNER:
{
struct v4l2_tuner *vt = arg;
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index e0d2ff83fc917..431c3e2f6c427 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -757,9 +757,9 @@ tveeprom_detect_client(struct i2c_adapter *adapter,
static int
tveeprom_attach_adapter (struct i2c_adapter *adapter)
{
- if (adapter->id != I2C_HW_B_BT848)
- return 0;
- return i2c_probe(adapter, &addr_data, tveeprom_detect_client);
+ if (adapter->class & I2C_CLASS_TV_ANALOG)
+ return i2c_probe(adapter, &addr_data, tveeprom_detect_client);
+ return 0;
}
static int
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 69d0fe159f4dc..dab4973bcf826 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -53,7 +53,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
- .default_value = 0,
+ .default_value = 128,
.flags = 0,
}, {
.id = V4L2_CID_CONTRAST,
@@ -62,7 +62,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
.minimum = 0,
.maximum = 255,
.step = 0x1,
- .default_value = 0x10,
+ .default_value = 128,
.flags = 0,
}, {
.id = V4L2_CID_SATURATION,
@@ -71,7 +71,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
.minimum = 0,
.maximum = 255,
.step = 0x1,
- .default_value = 0x10,
+ .default_value = 128,
.flags = 0,
}, {
.id = V4L2_CID_HUE,
@@ -80,7 +80,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
.minimum = -128,
.maximum = 127,
.step = 0x1,
- .default_value = 0x10,
+ .default_value = 0,
.flags = 0,
}
};
@@ -500,16 +500,21 @@ struct i2c_vbi_ram_value {
static struct i2c_vbi_ram_value vbi_ram_default[] =
{
+ /* FIXME: Current api doesn't handle all VBI types, those not
+ yet supported are placed under #if 0 */
+#if 0
{0x010, /* Teletext, SECAM, WST System A */
{V4L2_SLICED_TELETEXT_SECAM,6,23,1},
{ 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26,
0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 }
},
+#endif
{0x030, /* Teletext, PAL, WST System B */
- {V4L2_SLICED_TELETEXT_PAL_B,6,22,1},
+ {V4L2_SLICED_TELETEXT_B,6,22,1},
{ 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b,
0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 }
},
+#if 0
{0x050, /* Teletext, PAL, WST System C */
{V4L2_SLICED_TELETEXT_PAL_C,6,22,1},
{ 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
@@ -535,6 +540,7 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
{ 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
},
+#endif
{0x0f0, /* Closed Caption, NTSC */
{V4L2_SLICED_CAPTION_525,21,21,1},
{ 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
@@ -545,6 +551,7 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
{ 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42,
0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 }
},
+#if 0
{0x130, /* Wide Screen Signal, NTSC C */
{V4L2_SLICED_WSS_525,20,20,1},
{ 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43,
@@ -560,6 +567,7 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
},
+#endif
{0x190, /* Video Program System (VPS), PAL */
{V4L2_SLICED_VPS,16,16,0},
{ 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d,
@@ -850,7 +858,6 @@ static int tvp5150_command(struct i2c_client *c,
case 0:
case VIDIOC_INT_RESET:
- case DECODER_INIT:
tvp5150_reset(c);
break;
case VIDIOC_S_STD:
@@ -949,99 +956,15 @@ static int tvp5150_command(struct i2c_client *c,
#endif
case VIDIOC_LOG_STATUS:
- case DECODER_DUMP:
dump_reg(c);
break;
- case DECODER_GET_CAPABILITIES:
+ case VIDIOC_G_TUNER:
{
- struct video_decoder_capability *cap = arg;
-
- cap->flags = VIDEO_DECODER_PAL |
- VIDEO_DECODER_NTSC |
- VIDEO_DECODER_SECAM |
- VIDEO_DECODER_AUTO | VIDEO_DECODER_CCIR;
- cap->inputs = 3;
- cap->outputs = 1;
- break;
- }
- case DECODER_GET_STATUS:
- {
- int *iarg = arg;
- int status;
- int res=0;
- status = tvp5150_read(c, 0x88);
- if(status&0x08){
- res |= DECODER_STATUS_COLOR;
- }
- if(status&0x04 && status&0x02){
- res |= DECODER_STATUS_GOOD;
- }
- *iarg=res;
- break;
- }
-
- case DECODER_SET_GPIO:
- break;
-
- case DECODER_SET_VBI_BYPASS:
- break;
-
- case DECODER_SET_NORM:
- {
- int *iarg = arg;
-
- switch (*iarg) {
-
- case VIDEO_MODE_NTSC:
- break;
-
- case VIDEO_MODE_PAL:
- break;
-
- case VIDEO_MODE_SECAM:
- break;
-
- case VIDEO_MODE_AUTO:
- break;
-
- default:
- return -EINVAL;
-
- }
- decoder->norm = *iarg;
- break;
- }
- case DECODER_SET_INPUT:
- {
- int *iarg = arg;
- if (*iarg < 0 || *iarg > 3) {
- return -EINVAL;
- }
-
- decoder->input = *iarg;
- tvp5150_selmux(c, decoder->input);
-
- break;
- }
- case DECODER_SET_OUTPUT:
- {
- int *iarg = arg;
-
- /* not much choice of outputs */
- if (*iarg != 0) {
- return -EINVAL;
- }
- break;
- }
- case DECODER_ENABLE_OUTPUT:
- {
- int *iarg = arg;
-
- decoder->enable = (*iarg != 0);
-
- tvp5150_selmux(c, decoder->input);
+ struct v4l2_tuner *vt = arg;
+ int status = tvp5150_read(c, 0x88);
+ vt->signal = ((status & 0x04) && (status & 0x02)) ? 0xffff : 0x0;
break;
}
case VIDIOC_QUERYCTRL:
@@ -1087,35 +1010,6 @@ static int tvp5150_command(struct i2c_client *c,
return -EINVAL;
}
- case DECODER_SET_PICTURE:
- {
- struct video_picture *pic = arg;
- if (decoder->bright != pic->brightness) {
- /* We want 0 to 255 we get 0-65535 */
- decoder->bright = pic->brightness;
- tvp5150_write(c, TVP5150_BRIGHT_CTL,
- decoder->bright >> 8);
- }
- if (decoder->contrast != pic->contrast) {
- /* We want 0 to 255 we get 0-65535 */
- decoder->contrast = pic->contrast;
- tvp5150_write(c, TVP5150_CONTRAST_CTL,
- decoder->contrast >> 8);
- }
- if (decoder->sat != pic->colour) {
- /* We want 0 to 255 we get 0-65535 */
- decoder->sat = pic->colour;
- tvp5150_write(c, TVP5150_SATURATION_CTL,
- decoder->contrast >> 8);
- }
- if (decoder->hue != pic->hue) {
- /* We want -128 to 127 we get 0-65535 */
- decoder->hue = pic->hue;
- tvp5150_write(c, TVP5150_HUE_CTL,
- (decoder->hue - 32768) >> 8);
- }
- break;
- }
default:
return -EINVAL;
}
diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c
new file mode 100644
index 0000000000000..fc52201d607ea
--- /dev/null
+++ b/drivers/media/video/upd64031a.c
@@ -0,0 +1,286 @@
+/*
+ * upd64031A - NEC Electronics Ghost Reduction for NTSC in Japan
+ *
+ * 2003 by T.Adachi <tadachi@tadachi-net.com>
+ * 2003 by Takeru KOMORIYA <komoriya@paken.org>
+ * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/upd64031a.h>
+
+// --------------------- read registers functions define -----------------------
+
+/* bit masks */
+#define GR_MODE_MASK 0xc0
+#define DIRECT_3DYCS_CONNECT_MASK 0xc0
+#define SYNC_CIRCUIT_MASK 0xa0
+
+// -----------------------------------------------------------------------------
+
+MODULE_DESCRIPTION("uPD64031A driver");
+MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil");
+MODULE_LICENSE("GPL");
+
+static int debug = 0;
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+static unsigned short normal_i2c[] = { 0x24 >> 1, 0x26 >> 1, I2C_CLIENT_END };
+
+
+I2C_CLIENT_INSMOD;
+
+enum {
+ R00 = 0, R01, R02, R03, R04,
+ R05, R06, R07, R08, R09,
+ R0A, R0B, R0C, R0D, R0E, R0F,
+ /* unused registers
+ R10, R11, R12, R13, R14,
+ R15, R16, R17,
+ */
+ TOT_REGS
+};
+
+struct upd64031a_state {
+ u8 regs[TOT_REGS];
+ u8 gr_mode;
+ u8 direct_3dycs_connect;
+ u8 ext_comp_sync;
+ u8 ext_vert_sync;
+};
+
+static u8 upd64031a_init[] = {
+ 0x00, 0xb8, 0x48, 0xd2, 0xe6,
+ 0x03, 0x10, 0x0b, 0xaf, 0x7f,
+ 0x00, 0x00, 0x1d, 0x5e, 0x00,
+ 0xd0
+};
+
+/* ------------------------------------------------------------------------ */
+
+static u8 upd64031a_read(struct i2c_client *client, u8 reg)
+{
+ u8 buf[2];
+
+ if (reg >= sizeof(buf))
+ return 0xff;
+ i2c_master_recv(client, buf, 2);
+ return buf[reg];
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void upd64031a_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+ v4l_dbg(1, debug, client, "writing reg addr: %02X val: %02X\n", reg, val);
+ if (i2c_master_send(client, buf, 2) != 2)
+ v4l_err(client, "I/O error write 0x%02x/0x%02x\n", reg, val);
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* The input changed due to new input or channel changed */
+static void upd64031a_change(struct i2c_client *client)
+{
+ struct upd64031a_state *state = i2c_get_clientdata(client);
+ u8 reg = state->regs[R00];
+
+ v4l_dbg(1, debug, client, "changed input or channel\n");
+ upd64031a_write(client, R00, reg | 0x10);
+ upd64031a_write(client, R00, reg & ~0x10);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int upd64031a_command(struct i2c_client *client, unsigned int cmd, void *arg)
+{
+ struct upd64031a_state *state = i2c_get_clientdata(client);
+ struct v4l2_routing *route = arg;
+
+ switch (cmd) {
+ case VIDIOC_S_FREQUENCY:
+ upd64031a_change(client);
+ break;
+
+ case VIDIOC_INT_G_VIDEO_ROUTING:
+ route->input = (state->gr_mode >> 6) |
+ (state->direct_3dycs_connect >> 4) |
+ (state->ext_comp_sync >> 1) |
+ (state->ext_vert_sync >> 2);
+ route->output = 0;
+ break;
+
+ case VIDIOC_INT_S_VIDEO_ROUTING:
+ {
+ u8 r00, r05, r08;
+
+ state->gr_mode = (route->input & 3) << 6;
+ state->direct_3dycs_connect = (route->input & 0xc) << 4;
+ state->ext_comp_sync = (route->input & UPD64031A_COMPOSITE_EXTERNAL) << 1;
+ state->ext_vert_sync = (route->input & UPD64031A_VERTICAL_EXTERNAL) << 2;
+ r00 = (state->regs[R00] & ~GR_MODE_MASK) | state->gr_mode;
+ r05 = (state->regs[R00] & ~SYNC_CIRCUIT_MASK) |
+ state->ext_comp_sync | state->ext_vert_sync;
+ r08 = (state->regs[R08] & ~DIRECT_3DYCS_CONNECT_MASK) |
+ state->direct_3dycs_connect;
+ upd64031a_write(client, R00, r00);
+ upd64031a_write(client, R05, r05);
+ upd64031a_write(client, R08, r08);
+ upd64031a_change(client);
+ break;
+ }
+
+ case VIDIOC_LOG_STATUS:
+ v4l_info(client, "Status: SA00=0x%02x SA01=0x%02x\n",
+ upd64031a_read(client, 0), upd64031a_read(client, 1));
+ break;
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ case VIDIOC_INT_G_REGISTER:
+ {
+ struct v4l2_register *reg = arg;
+
+ if (reg->i2c_id != I2C_DRIVERID_UPD64031A)
+ return -EINVAL;
+ reg->val = upd64031a_read(client, reg->reg & 0xff);
+ break;
+ }
+
+ case VIDIOC_INT_S_REGISTER:
+ {
+ struct v4l2_register *reg = arg;
+ u8 addr = reg->reg & 0xff;
+ u8 val = reg->val & 0xff;
+
+ if (reg->i2c_id != I2C_DRIVERID_UPD64031A)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ upd64031a_write(client, addr, val);
+ break;
+ }
+#endif
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* i2c implementation */
+
+static struct i2c_driver i2c_driver;
+
+static int upd64031a_attach(struct i2c_adapter *adapter, int address, int kind)
+{
+ struct i2c_client *client;
+ struct upd64031a_state *state;
+ int i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return 0;
+
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ if (client == NULL) {
+ return -ENOMEM;
+ }
+
+ client->addr = address;
+ client->adapter = adapter;
+ client->driver = &i2c_driver;
+ snprintf(client->name, sizeof(client->name) - 1, "uPD64031A");
+
+ v4l_info(client, "chip found @ 0x%x (%s)\n", address << 1, adapter->name);
+
+ state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
+ if (state == NULL) {
+ kfree(client);
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, state);
+ memcpy(state->regs, upd64031a_init, sizeof(state->regs));
+ state->gr_mode = UPD64031A_GR_ON << 6;
+ state->direct_3dycs_connect = UPD64031A_3DYCS_COMPOSITE << 4;
+ state->ext_comp_sync = state->ext_vert_sync = 0;
+ for (i = 0; i < TOT_REGS; i++) {
+ upd64031a_write(client, i, state->regs[i]);
+ }
+
+ i2c_attach_client(client);
+
+ return 0;
+}
+
+static int upd64031a_probe(struct i2c_adapter *adapter)
+{
+ if (adapter->class & I2C_CLASS_TV_ANALOG)
+ return i2c_probe(adapter, &addr_data, upd64031a_attach);
+ return 0;
+}
+
+static int upd64031a_detach(struct i2c_client *client)
+{
+ int err;
+
+ err = i2c_detach_client(client);
+ if (err)
+ return err;
+
+ kfree(client);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* i2c implementation */
+static struct i2c_driver i2c_driver = {
+ .driver = {
+ .name = "upd64031a",
+ },
+ .id = I2C_DRIVERID_UPD64031A,
+ .attach_adapter = upd64031a_probe,
+ .detach_client = upd64031a_detach,
+ .command = upd64031a_command,
+};
+
+
+static int __init upd64031a_init_module(void)
+{
+ return i2c_add_driver(&i2c_driver);
+}
+
+static void __exit upd64031a_exit_module(void)
+{
+ i2c_del_driver(&i2c_driver);
+}
+
+module_init(upd64031a_init_module);
+module_exit(upd64031a_exit_module);
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
new file mode 100644
index 0000000000000..c3a7ffe5c2674
--- /dev/null
+++ b/drivers/media/video/upd64083.c
@@ -0,0 +1,262 @@
+/*
+ * upd6408x - NEC Electronics 3-Dimensional Y/C separation driver
+ *
+ * 2003 by T.Adachi (tadachi@tadachi-net.com)
+ * 2003 by Takeru KOMORIYA <komoriya@paken.org>
+ * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/upd64083.h>
+
+MODULE_DESCRIPTION("uPD64083 driver");
+MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil");
+MODULE_LICENSE("GPL");
+
+static int debug = 0;
+module_param(debug, bool, 0644);
+
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+static unsigned short normal_i2c[] = { 0xb8 >> 1, 0xba >> 1, I2C_CLIENT_END };
+
+
+I2C_CLIENT_INSMOD;
+
+enum {
+ R00 = 0, R01, R02, R03, R04,
+ R05, R06, R07, R08, R09,
+ R0A, R0B, R0C, R0D, R0E, R0F,
+ R10, R11, R12, R13, R14,
+ R15, R16,
+ TOT_REGS
+};
+
+struct upd64083_state {
+ u8 mode;
+ u8 ext_y_adc;
+ u8 regs[TOT_REGS];
+};
+
+/* Initial values when used in combination with the
+ NEC upd64031a ghost reduction chip. */
+static u8 upd64083_init[] = {
+ 0x1f, 0x01, 0xa0, 0x2d, 0x29, /* we use EXCSS=0 */
+ 0x36, 0xdd, 0x05, 0x56, 0x48,
+ 0x00, 0x3a, 0xa0, 0x05, 0x08,
+ 0x44, 0x60, 0x08, 0x52, 0xf8,
+ 0x53, 0x60, 0x10
+};
+
+/* ------------------------------------------------------------------------ */
+
+static void upd64083_log_status(struct i2c_client *client)
+{
+ u8 buf[7];
+
+ i2c_master_recv(client, buf, 7);
+ v4l_info(client, "Status: SA00=%02x SA01=%02x SA02=%02x SA03=%02x "
+ "SA04=%02x SA05=%02x SA06=%02x\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void upd64083_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+ v4l_dbg(1, debug, client, "writing reg addr: %02x val: %02x\n", reg, val);
+ if (i2c_master_send(client, buf, 2) != 2)
+ v4l_err(client, "I/O error write 0x%02x/0x%02x\n", reg, val);
+}
+
+/* ------------------------------------------------------------------------ */
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static u8 upd64083_read(struct i2c_client *client, u8 reg)
+{
+ u8 buf[7];
+
+ if (reg >= sizeof(buf))
+ return 0xff;
+ i2c_master_recv(client, buf, sizeof(buf));
+ return buf[reg];
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static int upd64083_command(struct i2c_client *client, unsigned int cmd, void *arg)
+{
+ struct upd64083_state *state = i2c_get_clientdata(client);
+ struct v4l2_routing *route = arg;
+
+ switch (cmd) {
+ case VIDIOC_INT_G_VIDEO_ROUTING:
+ route->input = (state->mode >> 6) | (state->ext_y_adc >> 3);
+ route->output = 0;
+ break;
+
+ case VIDIOC_INT_S_VIDEO_ROUTING:
+ {
+ u8 r00, r02;
+
+ if (route->input > 7 || (route->input & 6) == 6)
+ return -EINVAL;
+ state->mode = (route->input & 3) << 6;
+ state->ext_y_adc = (route->input & UPD64083_EXT_Y_ADC) << 3;
+ r00 = (state->regs[R00] & ~(3 << 6)) | state->mode;
+ r02 = (state->regs[R02] & ~(1 << 5)) | state->ext_y_adc;
+ upd64083_write(client, R00, r00);
+ upd64083_write(client, R02, r02);
+ break;
+ }
+
+ case VIDIOC_LOG_STATUS:
+ upd64083_log_status(client);
+ break;
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ case VIDIOC_INT_G_REGISTER:
+ {
+ struct v4l2_register *reg = arg;
+
+ if (reg->i2c_id != I2C_DRIVERID_UPD64083)
+ return -EINVAL;
+ reg->val = upd64083_read(client, reg->reg & 0xff);
+ break;
+ }
+
+ case VIDIOC_INT_S_REGISTER:
+ {
+ struct v4l2_register *reg = arg;
+ u8 addr = reg->reg & 0xff;
+ u8 val = reg->val & 0xff;
+
+ if (reg->i2c_id != I2C_DRIVERID_UPD64083)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ upd64083_write(client, addr, val);
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* i2c implementation */
+
+static struct i2c_driver i2c_driver;
+
+static int upd64083_attach(struct i2c_adapter *adapter, int address, int kind)
+{
+ struct i2c_client *client;
+ struct upd64083_state *state;
+ int i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return 0;
+
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ if (client == NULL) {
+ return -ENOMEM;
+ }
+
+ client->addr = address;
+ client->adapter = adapter;
+ client->driver = &i2c_driver;
+ snprintf(client->name, sizeof(client->name) - 1, "uPD64083");
+
+ v4l_info(client, "chip found @ 0x%x (%s)\n", address << 1, adapter->name);
+
+ state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL);
+ if (state == NULL) {
+ kfree(client);
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, state);
+ /* Initially assume that a ghost reduction chip is present */
+ state->mode = 0; /* YCS mode */
+ state->ext_y_adc = (1 << 5);
+ memcpy(state->regs, upd64083_init, TOT_REGS);
+ for (i = 0; i < TOT_REGS; i++) {
+ upd64083_write(client, i, state->regs[i]);
+ }
+ i2c_attach_client(client);
+
+ return 0;
+}
+
+static int upd64083_probe(struct i2c_adapter *adapter)
+{
+ if (adapter->class & I2C_CLASS_TV_ANALOG)
+ return i2c_probe(adapter, &addr_data, upd64083_attach);
+ return 0;
+}
+
+static int upd64083_detach(struct i2c_client *client)
+{
+ int err;
+
+ err = i2c_detach_client(client);
+ if (err)
+ return err;
+
+ kfree(client);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* i2c implementation */
+static struct i2c_driver i2c_driver = {
+ .driver = {
+ .name = "upd64083",
+ },
+ .id = I2C_DRIVERID_UPD64083,
+ .attach_adapter = upd64083_probe,
+ .detach_client = upd64083_detach,
+ .command = upd64083_command,
+};
+
+
+static int __init upd64083_init_module(void)
+{
+ return i2c_add_driver(&i2c_driver);
+}
+
+static void __exit upd64083_exit_module(void)
+{
+ i2c_del_driver(&i2c_driver);
+}
+
+module_init(upd64083_init_module);
+module_exit(upd64083_exit_module);
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig
new file mode 100644
index 0000000000000..08a5d20bb2c0d
--- /dev/null
+++ b/drivers/media/video/usbvideo/Kconfig
@@ -0,0 +1,38 @@
+config VIDEO_USBVIDEO
+ tristate
+
+config USB_VICAM
+ tristate "USB 3com HomeConnect (aka vicam) support (EXPERIMENTAL)"
+ depends on USB && VIDEO_DEV && EXPERIMENTAL
+ select VIDEO_USBVIDEO
+ ---help---
+ Say Y here if you have 3com homeconnect camera (vicam).
+
+ To compile this driver as a module, choose M here: the
+ module will be called vicam.
+
+config USB_IBMCAM
+ tristate "USB IBM (Xirlink) C-it Camera support"
+ depends on USB && VIDEO_DEV
+ select VIDEO_USBVIDEO
+ ---help---
+ Say Y here if you want to connect a IBM "C-It" camera, also known as
+ "Xirlink PC Camera" to your computer's USB port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ibmcam.
+
+ This camera has several configuration options which
+ can be specified when you load the module. Read
+ <file:Documentation/video4linux/ibmcam.txt> to learn more.
+
+config USB_KONICAWC
+ tristate "USB Konica Webcam support"
+ depends on USB && VIDEO_DEV
+ select VIDEO_USBVIDEO
+ ---help---
+ Say Y here if you want support for webcams based on a Konica
+ chipset. This is known to work with the Intel YC76 webcam.
+
+ To compile this driver as a module, choose M here: the
+ module will be called konicawc.
diff --git a/drivers/media/video/usbvideo/Makefile b/drivers/media/video/usbvideo/Makefile
index ed410a5ee8c9c..bb52eb8dc2f9c 100644
--- a/drivers/media/video/usbvideo/Makefile
+++ b/drivers/media/video/usbvideo/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_USB_IBMCAM) += ibmcam.o usbvideo.o ultracam.o
-obj-$(CONFIG_USB_KONICAWC) += konicawc.o usbvideo.o
-obj-$(CONFIG_USB_VICAM) += vicam.o usbvideo.o
-
+obj-$(CONFIG_VIDEO_USBVIDEO) += usbvideo.o
+obj-$(CONFIG_USB_IBMCAM) += ibmcam.o ultracam.o
+obj-$(CONFIG_USB_KONICAWC) += konicawc.o
+obj-$(CONFIG_USB_VICAM) += vicam.o
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 11a97f30b8764..d330fa985bcc2 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -317,6 +317,7 @@ static const char *v4l2_int_ioctls[] = {
[_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY",
[_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG",
+ [_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE",
[_IOC_NR(VIDIOC_INT_S_REGISTER)] = "VIDIOC_INT_S_REGISTER",
[_IOC_NR(VIDIOC_INT_G_REGISTER)] = "VIDIOC_INT_G_REGISTER",
[_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET",
@@ -325,7 +326,12 @@ static const char *v4l2_int_ioctls[] = {
[_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA",
[_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA",
[_IOC_NR(VIDIOC_INT_G_CHIP_IDENT)] = "VIDIOC_INT_G_CHIP_IDENT",
- [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ"
+ [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ",
+ [_IOC_NR(VIDIOC_INT_S_STANDBY)] = "VIDIOC_INT_S_STANDBY",
+ [_IOC_NR(VIDIOC_INT_S_AUDIO_ROUTING)] = "VIDIOC_INT_S_AUDIO_ROUTING",
+ [_IOC_NR(VIDIOC_INT_G_AUDIO_ROUTING)] = "VIDIOC_INT_G_AUDIO_ROUTING",
+ [_IOC_NR(VIDIOC_INT_S_VIDEO_ROUTING)] = "VIDIOC_INT_S_VIDEO_ROUTING",
+ [_IOC_NR(VIDIOC_INT_G_VIDEO_ROUTING)] = "VIDIOC_INT_G_VIDEO_ROUTING"
};
#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls)
diff --git a/drivers/media/video/video-buf.c b/drivers/media/video/video-buf.c
index d2ca0f08d0df6..acc5ea936687a 100644
--- a/drivers/media/video/video-buf.c
+++ b/drivers/media/video/video-buf.c
@@ -399,19 +399,25 @@ void videobuf_queue_pci(struct videobuf_queue* q)
int videobuf_pci_dma_map(struct pci_dev *pci,struct videobuf_dmabuf *dma)
{
struct videobuf_queue q;
+ struct videobuf_queue_ops qops;
q.dev=pci;
- q.ops->vb_map_sg=(vb_map_sg_t *)pci_unmap_sg;
+ qops.vb_map_sg=(vb_map_sg_t *)pci_map_sg;
+ qops.vb_unmap_sg=(vb_map_sg_t *)pci_unmap_sg;
+ q.ops = &qops;
- return (videobuf_dma_unmap(&q,dma));
+ return (videobuf_dma_map(&q,dma));
}
int videobuf_pci_dma_unmap(struct pci_dev *pci,struct videobuf_dmabuf *dma)
{
struct videobuf_queue q;
+ struct videobuf_queue_ops qops;
q.dev=pci;
- q.ops->vb_map_sg=(vb_map_sg_t *)pci_unmap_sg;
+ qops.vb_map_sg=(vb_map_sg_t *)pci_map_sg;
+ qops.vb_unmap_sg=(vb_map_sg_t *)pci_unmap_sg;
+ q.ops = &qops;
return (videobuf_dma_unmap(&q,dma));
}
@@ -923,7 +929,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
/* need to capture a new frame */
retval = -ENOMEM;
q->read_buf = videobuf_alloc(q->msize);
- dprintk(1,"video alloc=0x%08x\n",(unsigned int) q->read_buf);
+ dprintk(1,"video alloc=0x%p\n", q->read_buf);
if (NULL == q->read_buf)
goto done;
q->read_buf->memory = V4L2_MEMORY_USERPTR;
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
new file mode 100644
index 0000000000000..a9b59c35cd67e
--- /dev/null
+++ b/drivers/media/video/wm8739.c
@@ -0,0 +1,355 @@
+/*
+ * wm8739
+ *
+ * Copyright (C) 2005 T. Adachi <tadachi@tadachi-net.com>
+ *
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * - Cleanup
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <asm/uaccess.h>
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/videodev.h>
+#include <media/v4l2-common.h>
+
+MODULE_DESCRIPTION("wm8739 driver");
+MODULE_AUTHOR("T. Adachi, Hans Verkuil");
+MODULE_LICENSE("GPL");
+
+static int debug = 0;
+static unsigned short normal_i2c[] = { 0x34 >> 1, 0x36 >> 1, I2C_CLIENT_END };
+
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+
+I2C_CLIENT_INSMOD;
+
+/* ------------------------------------------------------------------------ */
+
+enum {
+ R0 = 0, R1,
+ R5 = 5, R6, R7, R8, R9, R15 = 15,
+ TOT_REGS
+};
+
+struct wm8739_state {
+ u32 clock_freq;
+ u8 muted;
+ u16 volume;
+ u16 balance;
+ u8 vol_l; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
+ u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int wm8739_write(struct i2c_client *client, int reg, u16 val)
+{
+ int i;
+
+ if (reg < 0 || reg >= TOT_REGS) {
+ v4l_err(client, "Invalid register R%d\n", reg);
+ return -1;
+ }
+
+ v4l_dbg(1, debug, client, "write: %02x %02x\n", reg, val);
+
+ for (i = 0; i < 3; i++) {
+ if (i2c_smbus_write_byte_data(client, (reg << 1) |
+ (val >> 8), val & 0xff) == 0) {
+ return 0;
+ }
+ }
+ v4l_err(client, "I2C: cannot write %03x to register R%d\n", val, reg);
+ return -1;
+}
+
+/* write regs to set audio volume etc */
+static void wm8739_set_audio(struct i2c_client *client)
+{
+ struct wm8739_state *state = i2c_get_clientdata(client);
+ u16 mute = state->muted ? 0x80 : 0;
+
+ /* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB
+ * Default setting: 0x17 = 0 dB
+ */
+ wm8739_write(client, R0, (state->vol_l & 0x1f) | mute);
+ wm8739_write(client, R1, (state->vol_r & 0x1f) | mute);
+}
+
+static int wm8739_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
+{
+ struct wm8739_state *state = i2c_get_clientdata(client);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ ctrl->value = state->muted;
+ break;
+
+ case V4L2_CID_AUDIO_VOLUME:
+ ctrl->value = state->volume;
+ break;
+
+ case V4L2_CID_AUDIO_BALANCE:
+ ctrl->value = state->balance;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int wm8739_set_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
+{
+ struct wm8739_state *state = i2c_get_clientdata(client);
+ unsigned int work_l, work_r;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ state->muted = ctrl->value;
+ break;
+
+ case V4L2_CID_AUDIO_VOLUME:
+ state->volume = ctrl->value;
+ break;
+
+ case V4L2_CID_AUDIO_BALANCE:
+ state->balance = ctrl->value;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* normalize ( 65535 to 0 -> 31 to 0 (12dB to -34.5dB) ) */
+ work_l = (min(65536 - state->balance, 32768) * state->volume) / 32768;
+ work_r = (min(state->balance, (u16)32768) * state->volume) / 32768;
+
+ state->vol_l = (long)work_l * 31 / 65535;
+ state->vol_r = (long)work_r * 31 / 65535;
+
+ /* set audio volume etc. */
+ wm8739_set_audio(client);
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct v4l2_queryctrl wm8739_qctrl[] = {
+ {
+ .id = V4L2_CID_AUDIO_VOLUME,
+ .name = "Volume",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 58880,
+ .flags = 0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },{
+ .id = V4L2_CID_AUDIO_MUTE,
+ .name = "Mute",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ .flags = 0,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ },{
+ .id = V4L2_CID_AUDIO_BALANCE,
+ .name = "Balance",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 32768,
+ .flags = 0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ }
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int wm8739_command(struct i2c_client *client, unsigned int cmd, void *arg)
+{
+ struct wm8739_state *state = i2c_get_clientdata(client);
+
+ switch (cmd) {
+ case VIDIOC_INT_AUDIO_CLOCK_FREQ:
+ {
+ u32 audiofreq = *(u32 *)arg;
+
+ state->clock_freq = audiofreq;
+ wm8739_write(client, R9, 0x000); /* de-activate */
+ switch (audiofreq) {
+ case 44100:
+ wm8739_write(client, R8, 0x020); /* 256fps, fs=44.1k */
+ break;
+ case 48000:
+ wm8739_write(client, R8, 0x000); /* 256fps, fs=48k */
+ break;
+ case 32000:
+ wm8739_write(client, R8, 0x018); /* 256fps, fs=32k */
+ break;
+ default:
+ break;
+ }
+ wm8739_write(client, R9, 0x001); /* activate */
+ break;
+ }
+
+ case VIDIOC_G_CTRL:
+ return wm8739_get_ctrl(client, arg);
+
+ case VIDIOC_S_CTRL:
+ return wm8739_set_ctrl(client, arg);
+
+ case VIDIOC_QUERYCTRL:
+ {
+ struct v4l2_queryctrl *qc = arg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm8739_qctrl); i++)
+ if (qc->id && qc->id == wm8739_qctrl[i].id) {
+ memcpy(qc, &wm8739_qctrl[i], sizeof(*qc));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ case VIDIOC_LOG_STATUS:
+ v4l_info(client, "Frequency: %u Hz\n", state->clock_freq);
+ v4l_info(client, "Volume L: %02x%s\n", state->vol_l & 0x1f,
+ state->muted ? " (muted)" : "");
+ v4l_info(client, "Volume R: %02x%s\n", state->vol_r & 0x1f,
+ state->muted ? " (muted)" : "");
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* i2c implementation */
+
+static struct i2c_driver i2c_driver;
+
+static int wm8739_attach(struct i2c_adapter *adapter, int address, int kind)
+{
+ struct i2c_client *client;
+ struct wm8739_state *state;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return 0;
+
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ if (client == NULL)
+ return -ENOMEM;
+
+ client->addr = address;
+ client->adapter = adapter;
+ client->driver = &i2c_driver;
+ snprintf(client->name, sizeof(client->name) - 1, "wm8739");
+
+ v4l_info(client, "chip found @ 0x%x (%s)\n", address << 1, adapter->name);
+
+ state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL);
+ if (state == NULL) {
+ kfree(client);
+ return -ENOMEM;
+ }
+ state->vol_l = 0x17; /* 0dB */
+ state->vol_r = 0x17; /* 0dB */
+ state->muted = 0;
+ state->balance = 32768;
+ /* normalize (12dB(31) to -34.5dB(0) [0dB(23)] -> 65535 to 0) */
+ state->volume = ((long)state->vol_l + 1) * 65535 / 31;
+ state->clock_freq = 48000;
+ i2c_set_clientdata(client, state);
+
+ /* initialize wm8739 */
+ wm8739_write(client, R15, 0x00); /* reset */
+ wm8739_write(client, R5, 0x000); /* filter setting, high path, offet clear */
+ wm8739_write(client, R6, 0x000); /* ADC, OSC, Power Off mode Disable */
+ wm8739_write(client, R7, 0x049); /* Digital Audio interface format */
+ /* Enable Master mode */
+ /* 24 bit, MSB first/left justified */
+ wm8739_write(client, R8, 0x000); /* sampling control */
+ /* normal, 256fs, 48KHz sampling rate */
+ wm8739_write(client, R9, 0x001); /* activate */
+ wm8739_set_audio(client); /* set volume/mute */
+
+ i2c_attach_client(client);
+
+ return 0;
+}
+
+static int wm8739_probe(struct i2c_adapter *adapter)
+{
+ if (adapter->class & I2C_CLASS_TV_ANALOG)
+ return i2c_probe(adapter, &addr_data, wm8739_attach);
+ return 0;
+}
+
+static int wm8739_detach(struct i2c_client *client)
+{
+ int err;
+
+ err = i2c_detach_client(client);
+ if (err)
+ return err;
+
+ kfree(client);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* i2c implementation */
+static struct i2c_driver i2c_driver = {
+ .driver = {
+ .name = "wm8739",
+ },
+ .id = I2C_DRIVERID_WM8739,
+ .attach_adapter = wm8739_probe,
+ .detach_client = wm8739_detach,
+ .command = wm8739_command,
+};
+
+
+static int __init wm8739_init_module(void)
+{
+ return i2c_add_driver(&i2c_driver);
+}
+
+static void __exit wm8739_cleanup_module(void)
+{
+ i2c_del_driver(&i2c_driver);
+}
+
+module_init(wm8739_init_module);
+module_exit(wm8739_cleanup_module);
diff --git a/drivers/media/video/zc0301/Kconfig b/drivers/media/video/zc0301/Kconfig
new file mode 100644
index 0000000000000..c3bf886b80cdb
--- /dev/null
+++ b/drivers/media/video/zc0301/Kconfig
@@ -0,0 +1,11 @@
+config USB_ZC0301
+ tristate "USB ZC0301 Image Processor and Control Chip support"
+ depends on USB && VIDEO_DEV
+ ---help---
+ Say Y here if you want support for cameras based on the ZC0301
+ Image Processor and Control Chip.
+
+ See <file:Documentation/video4linux/zc0301.txt> for more info.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zc0301.
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 3f5d77f633fa8..7cc162e8978be 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -60,6 +60,17 @@ config MMC_SDHCI
If unsure, say N.
+config MMC_OMAP
+ tristate "TI OMAP Multimedia Card Interface support"
+ depends on ARCH_OMAP && MMC
+ select TPS65010 if MACH_OMAP_H2
+ help
+ This selects the TI OMAP Multimedia card Interface.
+ If you have an OMAP board with a Multimedia Card slot,
+ say Y or M here.
+
+ If unsure, say N.
+
config MMC_WBSD
tristate "Winbond W83L51xD SD/MMC Card Interface support"
depends on MMC && ISA_DMA_API
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 769d545284a43..c7c34aadfc92c 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -20,5 +20,10 @@ obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
+obj-$(CONFIG_MMC_OMAP) += omap.o
mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o
+
+ifeq ($(CONFIG_MMC_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 85e89c77bdea2..c0326bbc5f283 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -56,12 +56,11 @@
#define DRIVER_NAME "au1xxx-mmc"
/* Set this to enable special debugging macros */
-/* #define MMC_DEBUG */
-#ifdef MMC_DEBUG
-#define DEBUG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
+#ifdef DEBUG
+#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
#else
-#define DEBUG(fmt, idx, args...)
+#define DBG(fmt, idx, args...)
#endif
const struct {
@@ -424,18 +423,18 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
break;
if (status & SD_STATUS_RC) {
- DEBUG("RX CRC Error [%d + %d].\n", host->id,
+ DBG("RX CRC Error [%d + %d].\n", host->id,
host->pio.len, count);
break;
}
if (status & SD_STATUS_RO) {
- DEBUG("RX Overrun [%d + %d]\n", host->id,
+ DBG("RX Overrun [%d + %d]\n", host->id,
host->pio.len, count);
break;
}
else if (status & SD_STATUS_RU) {
- DEBUG("RX Underrun [%d + %d]\n", host->id,
+ DBG("RX Underrun [%d + %d]\n", host->id,
host->pio.len, count);
break;
}
@@ -721,7 +720,7 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
{
struct au1xmmc_host *host = mmc_priv(mmc);
- DEBUG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
+ DBG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
host->id, ios->power_mode, ios->clock, ios->vdd,
ios->bus_mode);
@@ -810,7 +809,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id, struct pt_regs *regs)
au1xmmc_receive_pio(host);
}
else if (status & 0x203FBC70) {
- DEBUG("Unhandled status %8.8x\n", host->id, status);
+ DBG("Unhandled status %8.8x\n", host->id, status);
handled = 0;
}
@@ -839,7 +838,7 @@ static void au1xmmc_poll_event(unsigned long arg)
if (host->mrq != NULL) {
u32 status = au_readl(HOST_STATUS(host));
- DEBUG("PENDING - %8.8x\n", host->id, status);
+ DBG("PENDING - %8.8x\n", host->id, status);
}
mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 1888060c5e0c4..da6ddd910fc51 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -27,12 +27,6 @@
#include "mmc.h"
-#ifdef CONFIG_MMC_DEBUG
-#define DBG(x...) printk(KERN_DEBUG x)
-#else
-#define DBG(x...) do { } while (0)
-#endif
-
#define CMD_RETRIES 3
/*
@@ -77,8 +71,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
int err = mrq->cmd->error;
- DBG("MMC: req done (%02x): %d: %08x %08x %08x %08x\n", cmd->opcode,
- err, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
+ pr_debug("MMC: req done (%02x): %d: %08x %08x %08x %08x\n",
+ cmd->opcode, err, cmd->resp[0], cmd->resp[1],
+ cmd->resp[2], cmd->resp[3]);
if (err && cmd->retries) {
cmd->retries--;
@@ -102,8 +97,8 @@ EXPORT_SYMBOL(mmc_request_done);
void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
- DBG("MMC: starting cmd %02x arg %08x flags %08x\n",
- mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ pr_debug("MMC: starting cmd %02x arg %08x flags %08x\n",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
WARN_ON(host->card_busy == NULL);
@@ -976,8 +971,8 @@ static unsigned int mmc_calculate_clock(struct mmc_host *host)
if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
max_dtr = card->csd.max_dtr;
- DBG("MMC: selected %d.%03dMHz transfer rate\n",
- max_dtr / 1000000, (max_dtr / 1000) % 1000);
+ pr_debug("MMC: selected %d.%03dMHz transfer rate\n",
+ max_dtr / 1000000, (max_dtr / 1000) % 1000);
return max_dtr;
}
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 9fef29d978b5e..df7e861e2fc75 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -33,12 +33,8 @@
#define DRIVER_NAME "mmci-pl18x"
-#ifdef CONFIG_MMC_DEBUG
#define DBG(host,fmt,args...) \
pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
-#else
-#define DBG(host,fmt,args...) do { } while (0)
-#endif
static unsigned int fmax = 515633;
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
new file mode 100644
index 0000000000000..becb3c68c34d7
--- /dev/null
+++ b/drivers/mmc/omap.c
@@ -0,0 +1,1226 @@
+/*
+ * linux/drivers/media/mmc/omap.c
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
+ * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
+ * Other hacks (DMA, SD, etc) by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/protocol.h>
+#include <linux/mmc/card.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/scatterlist.h>
+#include <asm/mach-types.h>
+
+#include <asm/arch/board.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/fpga.h>
+#include <asm/arch/tps65010.h>
+
+#include "omap.h"
+
+#define DRIVER_NAME "mmci-omap"
+#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
+
+/* Specifies how often in millisecs to poll for card status changes
+ * when the cover switch is open */
+#define OMAP_MMC_SWITCH_POLL_DELAY 500
+
+static int mmc_omap_enable_poll = 1;
+
+struct mmc_omap_host {
+ int initialized;
+ int suspended;
+ struct mmc_request * mrq;
+ struct mmc_command * cmd;
+ struct mmc_data * data;
+ struct mmc_host * mmc;
+ struct device * dev;
+ unsigned char id; /* 16xx chips have 2 MMC blocks */
+ struct clk * iclk;
+ struct clk * fclk;
+ void __iomem *base;
+ int irq;
+ unsigned char bus_mode;
+ unsigned char hw_bus_mode;
+
+ unsigned int sg_len;
+ int sg_idx;
+ u16 * buffer;
+ u32 buffer_bytes_left;
+ u32 total_bytes_left;
+
+ unsigned use_dma:1;
+ unsigned brs_received:1, dma_done:1;
+ unsigned dma_is_read:1;
+ unsigned dma_in_use:1;
+ int dma_ch;
+ spinlock_t dma_lock;
+ struct timer_list dma_timer;
+ unsigned dma_len;
+
+ short power_pin;
+ short wp_pin;
+
+ int switch_pin;
+ struct work_struct switch_work;
+ struct timer_list switch_timer;
+ int switch_last_state;
+};
+
+static inline int
+mmc_omap_cover_is_open(struct mmc_omap_host *host)
+{
+ if (host->switch_pin < 0)
+ return 0;
+ return omap_get_gpio_datain(host->switch_pin);
+}
+
+static ssize_t
+mmc_omap_show_cover_switch(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_omap_host *host = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" :
+ "closed");
+}
+
+static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
+
+static ssize_t
+mmc_omap_show_enable_poll(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll);
+}
+
+static ssize_t
+mmc_omap_store_enable_poll(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ int enable_poll;
+
+ if (sscanf(buf, "%10d", &enable_poll) != 1)
+ return -EINVAL;
+
+ if (enable_poll != mmc_omap_enable_poll) {
+ struct mmc_omap_host *host = dev_get_drvdata(dev);
+
+ mmc_omap_enable_poll = enable_poll;
+ if (enable_poll && host->switch_pin >= 0)
+ schedule_work(&host->switch_work);
+ }
+ return size;
+}
+
+static DEVICE_ATTR(enable_poll, 0664,
+ mmc_omap_show_enable_poll, mmc_omap_store_enable_poll);
+
+static void
+mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
+{
+ u32 cmdreg;
+ u32 resptype;
+ u32 cmdtype;
+
+ host->cmd = cmd;
+
+ resptype = 0;
+ cmdtype = 0;
+
+ /* Our hardware needs to know exact type */
+ switch (RSP_TYPE(mmc_resp_type(cmd))) {
+ case RSP_TYPE(MMC_RSP_R1):
+ /* resp 1, resp 1b */
+ resptype = 1;
+ break;
+ case RSP_TYPE(MMC_RSP_R2):
+ resptype = 2;
+ break;
+ case RSP_TYPE(MMC_RSP_R3):
+ resptype = 3;
+ break;
+ default:
+ break;
+ }
+
+ if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
+ cmdtype = OMAP_MMC_CMDTYPE_ADTC;
+ } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
+ cmdtype = OMAP_MMC_CMDTYPE_BC;
+ } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
+ cmdtype = OMAP_MMC_CMDTYPE_BCR;
+ } else {
+ cmdtype = OMAP_MMC_CMDTYPE_AC;
+ }
+
+ cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
+
+ if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ cmdreg |= 1 << 6;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdreg |= 1 << 11;
+
+ if (host->data && !(host->data->flags & MMC_DATA_WRITE))
+ cmdreg |= 1 << 15;
+
+ clk_enable(host->fclk);
+
+ OMAP_MMC_WRITE(host->base, CTO, 200);
+ OMAP_MMC_WRITE(host->base, ARGL, cmd->arg & 0xffff);
+ OMAP_MMC_WRITE(host->base, ARGH, cmd->arg >> 16);
+ OMAP_MMC_WRITE(host->base, IE,
+ OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
+ OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
+ OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
+ OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
+ OMAP_MMC_STAT_END_OF_DATA);
+ OMAP_MMC_WRITE(host->base, CMD, cmdreg);
+}
+
+static void
+mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ if (host->dma_in_use) {
+ enum dma_data_direction dma_data_dir;
+
+ BUG_ON(host->dma_ch < 0);
+ if (data->error != MMC_ERR_NONE)
+ omap_stop_dma(host->dma_ch);
+ /* Release DMA channel lazily */
+ mod_timer(&host->dma_timer, jiffies + HZ);
+ if (data->flags & MMC_DATA_WRITE)
+ dma_data_dir = DMA_TO_DEVICE;
+ else
+ dma_data_dir = DMA_FROM_DEVICE;
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
+ dma_data_dir);
+ }
+ host->data = NULL;
+ host->sg_len = 0;
+ clk_disable(host->fclk);
+
+ /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
+ * dozens of requests until the card finishes writing data.
+ * It'd be cheaper to just wait till an EOFB interrupt arrives...
+ */
+
+ if (!data->stop) {
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, data->mrq);
+ return;
+ }
+
+ mmc_omap_start_command(host, data->stop);
+}
+
+static void
+mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ unsigned long flags;
+ int done;
+
+ if (!host->dma_in_use) {
+ mmc_omap_xfer_done(host, data);
+ return;
+ }
+ done = 0;
+ spin_lock_irqsave(&host->dma_lock, flags);
+ if (host->dma_done)
+ done = 1;
+ else
+ host->brs_received = 1;
+ spin_unlock_irqrestore(&host->dma_lock, flags);
+ if (done)
+ mmc_omap_xfer_done(host, data);
+}
+
+static void
+mmc_omap_dma_timer(unsigned long data)
+{
+ struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+
+ BUG_ON(host->dma_ch < 0);
+ omap_free_dma(host->dma_ch);
+ host->dma_ch = -1;
+}
+
+static void
+mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ unsigned long flags;
+ int done;
+
+ done = 0;
+ spin_lock_irqsave(&host->dma_lock, flags);
+ if (host->brs_received)
+ done = 1;
+ else
+ host->dma_done = 1;
+ spin_unlock_irqrestore(&host->dma_lock, flags);
+ if (done)
+ mmc_omap_xfer_done(host, data);
+}
+
+static void
+mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
+{
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] =
+ OMAP_MMC_READ(host->base, RSP0) |
+ (OMAP_MMC_READ(host->base, RSP1) << 16);
+ cmd->resp[2] =
+ OMAP_MMC_READ(host->base, RSP2) |
+ (OMAP_MMC_READ(host->base, RSP3) << 16);
+ cmd->resp[1] =
+ OMAP_MMC_READ(host->base, RSP4) |
+ (OMAP_MMC_READ(host->base, RSP5) << 16);
+ cmd->resp[0] =
+ OMAP_MMC_READ(host->base, RSP6) |
+ (OMAP_MMC_READ(host->base, RSP7) << 16);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] =
+ OMAP_MMC_READ(host->base, RSP6) |
+ (OMAP_MMC_READ(host->base, RSP7) << 16);
+ }
+ }
+
+ if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
+ host->mrq = NULL;
+ clk_disable(host->fclk);
+ mmc_request_done(host->mmc, cmd->mrq);
+ }
+}
+
+/* PIO only */
+static void
+mmc_omap_sg_to_buf(struct mmc_omap_host *host)
+{
+ struct scatterlist *sg;
+
+ sg = host->data->sg + host->sg_idx;
+ host->buffer_bytes_left = sg->length;
+ host->buffer = page_address(sg->page) + sg->offset;
+ if (host->buffer_bytes_left > host->total_bytes_left)
+ host->buffer_bytes_left = host->total_bytes_left;
+}
+
+/* PIO only */
+static void
+mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
+{
+ int n;
+ void __iomem *reg;
+ u16 *p;
+
+ if (host->buffer_bytes_left == 0) {
+ host->sg_idx++;
+ BUG_ON(host->sg_idx == host->sg_len);
+ mmc_omap_sg_to_buf(host);
+ }
+ n = 64;
+ if (n > host->buffer_bytes_left)
+ n = host->buffer_bytes_left;
+ host->buffer_bytes_left -= n;
+ host->total_bytes_left -= n;
+ host->data->bytes_xfered += n;
+
+ if (write) {
+ __raw_writesw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
+ } else {
+ __raw_readsw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
+ }
+}
+
+static inline void mmc_omap_report_irq(u16 status)
+{
+ static const char *mmc_omap_status_bits[] = {
+ "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
+ "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
+ };
+ int i, c = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
+ if (status & (1 << i)) {
+ if (c)
+ printk(" ");
+ printk("%s", mmc_omap_status_bits[i]);
+ c++;
+ }
+}
+
+static irqreturn_t mmc_omap_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
+ u16 status;
+ int end_command;
+ int end_transfer;
+ int transfer_error;
+
+ if (host->cmd == NULL && host->data == NULL) {
+ status = OMAP_MMC_READ(host->base, STAT);
+ dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
+ if (status != 0) {
+ OMAP_MMC_WRITE(host->base, STAT, status);
+ OMAP_MMC_WRITE(host->base, IE, 0);
+ }
+ return IRQ_HANDLED;
+ }
+
+ end_command = 0;
+ end_transfer = 0;
+ transfer_error = 0;
+
+ while ((status = OMAP_MMC_READ(host->base, STAT)) != 0) {
+ OMAP_MMC_WRITE(host->base, STAT, status);
+#ifdef CONFIG_MMC_DEBUG
+ dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
+ status, host->cmd != NULL ? host->cmd->opcode : -1);
+ mmc_omap_report_irq(status);
+ printk("\n");
+#endif
+ if (host->total_bytes_left) {
+ if ((status & OMAP_MMC_STAT_A_FULL) ||
+ (status & OMAP_MMC_STAT_END_OF_DATA))
+ mmc_omap_xfer_data(host, 0);
+ if (status & OMAP_MMC_STAT_A_EMPTY)
+ mmc_omap_xfer_data(host, 1);
+ }
+
+ if (status & OMAP_MMC_STAT_END_OF_DATA) {
+ end_transfer = 1;
+ }
+
+ if (status & OMAP_MMC_STAT_DATA_TOUT) {
+ dev_dbg(mmc_dev(host->mmc), "data timeout\n");
+ if (host->data) {
+ host->data->error |= MMC_ERR_TIMEOUT;
+ transfer_error = 1;
+ }
+ }
+
+ if (status & OMAP_MMC_STAT_DATA_CRC) {
+ if (host->data) {
+ host->data->error |= MMC_ERR_BADCRC;
+ dev_dbg(mmc_dev(host->mmc),
+ "data CRC error, bytes left %d\n",
+ host->total_bytes_left);
+ transfer_error = 1;
+ } else {
+ dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
+ }
+ }
+
+ if (status & OMAP_MMC_STAT_CMD_TOUT) {
+ /* Timeouts are routine with some commands */
+ if (host->cmd) {
+ if (host->cmd->opcode != MMC_ALL_SEND_CID &&
+ host->cmd->opcode !=
+ MMC_SEND_OP_COND &&
+ host->cmd->opcode !=
+ MMC_APP_CMD &&
+ !mmc_omap_cover_is_open(host))
+ dev_err(mmc_dev(host->mmc),
+ "command timeout, CMD %d\n",
+ host->cmd->opcode);
+ host->cmd->error = MMC_ERR_TIMEOUT;
+ end_command = 1;
+ }
+ }
+
+ if (status & OMAP_MMC_STAT_CMD_CRC) {
+ if (host->cmd) {
+ dev_err(mmc_dev(host->mmc),
+ "command CRC error (CMD%d, arg 0x%08x)\n",
+ host->cmd->opcode, host->cmd->arg);
+ host->cmd->error = MMC_ERR_BADCRC;
+ end_command = 1;
+ } else
+ dev_err(mmc_dev(host->mmc),
+ "command CRC error without cmd?\n");
+ }
+
+ if (status & OMAP_MMC_STAT_CARD_ERR) {
+ if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
+ u32 response = OMAP_MMC_READ(host->base, RSP6)
+ | (OMAP_MMC_READ(host->base, RSP7) << 16);
+ /* STOP sometimes sets must-ignore bits */
+ if (!(response & (R1_CC_ERROR
+ | R1_ILLEGAL_COMMAND
+ | R1_COM_CRC_ERROR))) {
+ end_command = 1;
+ continue;
+ }
+ }
+
+ dev_dbg(mmc_dev(host->mmc), "card status error (CMD%d)\n",
+ host->cmd->opcode);
+ if (host->cmd) {
+ host->cmd->error = MMC_ERR_FAILED;
+ end_command = 1;
+ }
+ if (host->data) {
+ host->data->error = MMC_ERR_FAILED;
+ transfer_error = 1;
+ }
+ }
+
+ /*
+ * NOTE: On 1610 the END_OF_CMD may come too early when
+ * starting a write
+ */
+ if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
+ (!(status & OMAP_MMC_STAT_A_EMPTY))) {
+ end_command = 1;
+ }
+ }
+
+ if (end_command) {
+ mmc_omap_cmd_done(host, host->cmd);
+ }
+ if (transfer_error)
+ mmc_omap_xfer_done(host, host->data);
+ else if (end_transfer)
+ mmc_omap_end_of_data(host, host->data);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id;
+
+ schedule_work(&host->switch_work);
+
+ return IRQ_HANDLED;
+}
+
+static void mmc_omap_switch_timer(unsigned long arg)
+{
+ struct mmc_omap_host *host = (struct mmc_omap_host *) arg;
+
+ schedule_work(&host->switch_work);
+}
+
+/* FIXME: Handle card insertion and removal properly. Maybe use a mask
+ * for MMC state? */
+static void mmc_omap_switch_callback(unsigned long data, u8 mmc_mask)
+{
+}
+
+static void mmc_omap_switch_handler(void *data)
+{
+ struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_card *card;
+ static int complained = 0;
+ int cards = 0, cover_open;
+
+ if (host->switch_pin == -1)
+ return;
+ cover_open = mmc_omap_cover_is_open(host);
+ if (cover_open != host->switch_last_state) {
+ kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
+ host->switch_last_state = cover_open;
+ }
+ mmc_detect_change(host->mmc, 0);
+ list_for_each_entry(card, &host->mmc->cards, node) {
+ if (mmc_card_present(card))
+ cards++;
+ }
+ if (mmc_omap_cover_is_open(host)) {
+ if (!complained) {
+ dev_info(mmc_dev(host->mmc), "cover is open");
+ complained = 1;
+ }
+ if (mmc_omap_enable_poll)
+ mod_timer(&host->switch_timer, jiffies +
+ msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY));
+ } else {
+ complained = 0;
+ }
+}
+
+/* Prepare to transfer the next segment of a scatterlist */
+static void
+mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ int dma_ch = host->dma_ch;
+ unsigned long data_addr;
+ u16 buf, frame;
+ u32 count;
+ struct scatterlist *sg = &data->sg[host->sg_idx];
+ int src_port = 0;
+ int dst_port = 0;
+ int sync_dev = 0;
+
+ data_addr = io_v2p((u32) host->base) + OMAP_MMC_REG_DATA;
+ frame = 1 << data->blksz_bits;
+ count = sg_dma_len(sg);
+
+ if ((data->blocks == 1) && (count > (1 << data->blksz_bits)))
+ count = frame;
+
+ host->dma_len = count;
+
+ /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
+ * Use 16 or 32 word frames when the blocksize is at least that large.
+ * Blocksize is usually 512 bytes; but not for some SD reads.
+ */
+ if (cpu_is_omap15xx() && frame > 32)
+ frame = 32;
+ else if (frame > 64)
+ frame = 64;
+ count /= frame;
+ frame >>= 1;
+
+ if (!(data->flags & MMC_DATA_WRITE)) {
+ buf = 0x800f | ((frame - 1) << 8);
+
+ if (cpu_class_is_omap1()) {
+ src_port = OMAP_DMA_PORT_TIPB;
+ dst_port = OMAP_DMA_PORT_EMIFF;
+ }
+ if (cpu_is_omap24xx())
+ sync_dev = OMAP24XX_DMA_MMC1_RX;
+
+ omap_set_dma_src_params(dma_ch, src_port,
+ OMAP_DMA_AMODE_CONSTANT,
+ data_addr, 0, 0);
+ omap_set_dma_dest_params(dma_ch, dst_port,
+ OMAP_DMA_AMODE_POST_INC,
+ sg_dma_address(sg), 0, 0);
+ omap_set_dma_dest_data_pack(dma_ch, 1);
+ omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
+ } else {
+ buf = 0x0f80 | ((frame - 1) << 0);
+
+ if (cpu_class_is_omap1()) {
+ src_port = OMAP_DMA_PORT_EMIFF;
+ dst_port = OMAP_DMA_PORT_TIPB;
+ }
+ if (cpu_is_omap24xx())
+ sync_dev = OMAP24XX_DMA_MMC1_TX;
+
+ omap_set_dma_dest_params(dma_ch, dst_port,
+ OMAP_DMA_AMODE_CONSTANT,
+ data_addr, 0, 0);
+ omap_set_dma_src_params(dma_ch, src_port,
+ OMAP_DMA_AMODE_POST_INC,
+ sg_dma_address(sg), 0, 0);
+ omap_set_dma_src_data_pack(dma_ch, 1);
+ omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
+ }
+
+ /* Max limit for DMA frame count is 0xffff */
+ if (unlikely(count > 0xffff))
+ BUG();
+
+ OMAP_MMC_WRITE(host->base, BUF, buf);
+ omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
+ frame, count, OMAP_DMA_SYNC_FRAME,
+ sync_dev, 0);
+}
+
+/* A scatterlist segment completed */
+static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
+{
+ struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_data *mmcdat = host->data;
+
+ if (unlikely(host->dma_ch < 0)) {
+ dev_err(mmc_dev(host->mmc), "DMA callback while DMA not
+ enabled\n");
+ return;
+ }
+ /* FIXME: We really should do something to _handle_ the errors */
+ if (ch_status & OMAP_DMA_TOUT_IRQ) {
+ dev_err(mmc_dev(host->mmc),"DMA timeout\n");
+ return;
+ }
+ if (ch_status & OMAP_DMA_DROP_IRQ) {
+ dev_err(mmc_dev(host->mmc), "DMA sync error\n");
+ return;
+ }
+ if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
+ return;
+ }
+ mmcdat->bytes_xfered += host->dma_len;
+ host->sg_idx++;
+ if (host->sg_idx < host->sg_len) {
+ mmc_omap_prepare_dma(host, host->data);
+ omap_start_dma(host->dma_ch);
+ } else
+ mmc_omap_dma_done(host, host->data);
+}
+
+static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ const char *dev_name;
+ int sync_dev, dma_ch, is_read, r;
+
+ is_read = !(data->flags & MMC_DATA_WRITE);
+ del_timer_sync(&host->dma_timer);
+ if (host->dma_ch >= 0) {
+ if (is_read == host->dma_is_read)
+ return 0;
+ omap_free_dma(host->dma_ch);
+ host->dma_ch = -1;
+ }
+
+ if (is_read) {
+ if (host->id == 1) {
+ sync_dev = OMAP_DMA_MMC_RX;
+ dev_name = "MMC1 read";
+ } else {
+ sync_dev = OMAP_DMA_MMC2_RX;
+ dev_name = "MMC2 read";
+ }
+ } else {
+ if (host->id == 1) {
+ sync_dev = OMAP_DMA_MMC_TX;
+ dev_name = "MMC1 write";
+ } else {
+ sync_dev = OMAP_DMA_MMC2_TX;
+ dev_name = "MMC2 write";
+ }
+ }
+ r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
+ host, &dma_ch);
+ if (r != 0) {
+ dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
+ return r;
+ }
+ host->dma_ch = dma_ch;
+ host->dma_is_read = is_read;
+
+ return 0;
+}
+
+static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
+{
+ u16 reg;
+
+ reg = OMAP_MMC_READ(host->base, SDIO);
+ reg &= ~(1 << 5);
+ OMAP_MMC_WRITE(host->base, SDIO, reg);
+ /* Set maximum timeout */
+ OMAP_MMC_WRITE(host->base, CTO, 0xff);
+}
+
+static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
+{
+ int timeout;
+ u16 reg;
+
+ /* Convert ns to clock cycles by assuming 20MHz frequency
+ * 1 cycle at 20MHz = 500 ns
+ */
+ timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
+
+ /* Check if we need to use timeout multiplier register */
+ reg = OMAP_MMC_READ(host->base, SDIO);
+ if (timeout > 0xffff) {
+ reg |= (1 << 5);
+ timeout /= 1024;
+ } else
+ reg &= ~(1 << 5);
+ OMAP_MMC_WRITE(host->base, SDIO, reg);
+ OMAP_MMC_WRITE(host->base, DTO, timeout);
+}
+
+static void
+mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
+{
+ struct mmc_data *data = req->data;
+ int i, use_dma, block_size;
+ unsigned sg_len;
+
+ host->data = data;
+ if (data == NULL) {
+ OMAP_MMC_WRITE(host->base, BLEN, 0);
+ OMAP_MMC_WRITE(host->base, NBLK, 0);
+ OMAP_MMC_WRITE(host->base, BUF, 0);
+ host->dma_in_use = 0;
+ set_cmd_timeout(host, req);
+ return;
+ }
+
+
+ block_size = 1 << data->blksz_bits;
+
+ OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1);
+ OMAP_MMC_WRITE(host->base, BLEN, block_size - 1);
+ set_data_timeout(host, req);
+
+ /* cope with calling layer confusion; it issues "single
+ * block" writes using multi-block scatterlists.
+ */
+ sg_len = (data->blocks == 1) ? 1 : data->sg_len;
+
+ /* Only do DMA for entire blocks */
+ use_dma = host->use_dma;
+ if (use_dma) {
+ for (i = 0; i < sg_len; i++) {
+ if ((data->sg[i].length % block_size) != 0) {
+ use_dma = 0;
+ break;
+ }
+ }
+ }
+
+ host->sg_idx = 0;
+ if (use_dma) {
+ if (mmc_omap_get_dma_channel(host, data) == 0) {
+ enum dma_data_direction dma_data_dir;
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_data_dir = DMA_TO_DEVICE;
+ else
+ dma_data_dir = DMA_FROM_DEVICE;
+
+ host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ sg_len, dma_data_dir);
+ host->total_bytes_left = 0;
+ mmc_omap_prepare_dma(host, req->data);
+ host->brs_received = 0;
+ host->dma_done = 0;
+ host->dma_in_use = 1;
+ } else
+ use_dma = 0;
+ }
+
+ /* Revert to PIO? */
+ if (!use_dma) {
+ OMAP_MMC_WRITE(host->base, BUF, 0x1f1f);
+ host->total_bytes_left = data->blocks * block_size;
+ host->sg_len = sg_len;
+ mmc_omap_sg_to_buf(host);
+ host->dma_in_use = 0;
+ }
+}
+
+static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct mmc_omap_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = req;
+
+ /* only touch fifo AFTER the controller readies it */
+ mmc_omap_prepare_data(host, req);
+ mmc_omap_start_command(host, req->cmd);
+ if (host->dma_in_use)
+ omap_start_dma(host->dma_ch);
+}
+
+static void innovator_fpga_socket_power(int on)
+{
+#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
+
+ if (on) {
+ fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
+ OMAP1510_FPGA_POWER);
+ } else {
+ fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3),
+ OMAP1510_FPGA_POWER);
+ }
+#endif
+}
+
+/*
+ * Turn the socket power on/off. Innovator uses FPGA, most boards
+ * probably use GPIO.
+ */
+static void mmc_omap_power(struct mmc_omap_host *host, int on)
+{
+ if (on) {
+ if (machine_is_omap_innovator())
+ innovator_fpga_socket_power(1);
+ else if (machine_is_omap_h2())
+ tps65010_set_gpio_out_value(GPIO3, HIGH);
+ else if (machine_is_omap_h3())
+ /* GPIO 4 of TPS65010 sends SD_EN signal */
+ tps65010_set_gpio_out_value(GPIO4, HIGH);
+ else if (cpu_is_omap24xx()) {
+ u16 reg = OMAP_MMC_READ(host->base, CON);
+ OMAP_MMC_WRITE(host->base, CON, reg | (1 << 11));
+ } else
+ if (host->power_pin >= 0)
+ omap_set_gpio_dataout(host->power_pin, 1);
+ } else {
+ if (machine_is_omap_innovator())
+ innovator_fpga_socket_power(0);
+ else if (machine_is_omap_h2())
+ tps65010_set_gpio_out_value(GPIO3, LOW);
+ else if (machine_is_omap_h3())
+ tps65010_set_gpio_out_value(GPIO4, LOW);
+ else if (cpu_is_omap24xx()) {
+ u16 reg = OMAP_MMC_READ(host->base, CON);
+ OMAP_MMC_WRITE(host->base, CON, reg & ~(1 << 11));
+ } else
+ if (host->power_pin >= 0)
+ omap_set_gpio_dataout(host->power_pin, 0);
+ }
+}
+
+static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmc_omap_host *host = mmc_priv(mmc);
+ int dsor;
+ int realclock, i;
+
+ realclock = ios->clock;
+
+ if (ios->clock == 0)
+ dsor = 0;
+ else {
+ int func_clk_rate = clk_get_rate(host->fclk);
+
+ dsor = func_clk_rate / realclock;
+ if (dsor < 1)
+ dsor = 1;
+
+ if (func_clk_rate / dsor > realclock)
+ dsor++;
+
+ if (dsor > 250)
+ dsor = 250;
+ dsor++;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ dsor |= 1 << 15;
+ }
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ mmc_omap_power(host, 0);
+ break;
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ mmc_omap_power(host, 1);
+ dsor |= 1<<11;
+ break;
+ }
+
+ host->bus_mode = ios->bus_mode;
+ host->hw_bus_mode = host->bus_mode;
+
+ clk_enable(host->fclk);
+
+ /* On insanely high arm_per frequencies something sometimes
+ * goes somehow out of sync, and the POW bit is not being set,
+ * which results in the while loop below getting stuck.
+ * Writing to the CON register twice seems to do the trick. */
+ for (i = 0; i < 2; i++)
+ OMAP_MMC_WRITE(host->base, CON, dsor);
+ if (ios->power_mode == MMC_POWER_UP) {
+ /* Send clock cycles, poll completion */
+ OMAP_MMC_WRITE(host->base, IE, 0);
+ OMAP_MMC_WRITE(host->base, STAT, 0xffff);
+ OMAP_MMC_WRITE(host->base, CMD, 1<<7);
+ while (0 == (OMAP_MMC_READ(host->base, STAT) & 1));
+ OMAP_MMC_WRITE(host->base, STAT, 1);
+ }
+ clk_disable(host->fclk);
+}
+
+static int mmc_omap_get_ro(struct mmc_host *mmc)
+{
+ struct mmc_omap_host *host = mmc_priv(mmc);
+
+ return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
+}
+
+static struct mmc_host_ops mmc_omap_ops = {
+ .request = mmc_omap_request,
+ .set_ios = mmc_omap_set_ios,
+ .get_ro = mmc_omap_get_ro,
+};
+
+static int __init mmc_omap_probe(struct platform_device *pdev)
+{
+ struct omap_mmc_conf *minfo = pdev->dev.platform_data;
+ struct mmc_host *mmc;
+ struct mmc_omap_host *host = NULL;
+ int ret = 0;
+
+ if (platform_get_resource(pdev, IORESOURCE_MEM, 0) ||
+ platform_get_irq(pdev, IORESOURCE_IRQ, 0)) {
+ dev_err(&pdev->dev, "mmc_omap_probe: invalid resource type\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(pdev->resource[0].start,
+ pdev->resource[0].end - pdev->resource[0].start + 1,
+ pdev->name)) {
+ dev_dbg(&pdev->dev, "request_mem_region failed\n");
+ return -EBUSY;
+ }
+
+ mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ spin_lock_init(&host->dma_lock);
+ init_timer(&host->dma_timer);
+ host->dma_timer.function = mmc_omap_dma_timer;
+ host->dma_timer.data = (unsigned long) host;
+
+ host->id = pdev->id;
+
+ if (cpu_is_omap24xx()) {
+ host->iclk = clk_get(&pdev->dev, "mmc_ick");
+ if (IS_ERR(host->iclk))
+ goto out;
+ clk_enable(host->iclk);
+ }
+
+ if (!cpu_is_omap24xx())
+ host->fclk = clk_get(&pdev->dev, "mmc_ck");
+ else
+ host->fclk = clk_get(&pdev->dev, "mmc_fck");
+
+ if (IS_ERR(host->fclk)) {
+ ret = PTR_ERR(host->fclk);
+ goto out;
+ }
+
+ /* REVISIT:
+ * Also, use minfo->cover to decide how to manage
+ * the card detect sensing.
+ */
+ host->power_pin = minfo->power_pin;
+ host->switch_pin = minfo->switch_pin;
+ host->wp_pin = minfo->wp_pin;
+ host->use_dma = 1;
+ host->dma_ch = -1;
+
+ host->irq = pdev->resource[1].start;
+ host->base = ioremap(pdev->res.start, SZ_4K);
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (minfo->wire4)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ mmc->ops = &mmc_omap_ops;
+ mmc->f_min = 400000;
+ mmc->f_max = 24000000;
+ mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
+
+ /* Use scatterlist DMA to reduce per-transfer costs.
+ * NOTE max_seg_size assumption that small blocks aren't
+ * normally used (except e.g. for reading SD registers).
+ */
+ mmc->max_phys_segs = 32;
+ mmc->max_hw_segs = 32;
+ mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */
+ mmc->max_seg_size = mmc->max_sectors * 512;
+
+ if (host->power_pin >= 0) {
+ if ((ret = omap_request_gpio(host->power_pin)) != 0) {
+ dev_err(mmc_dev(host->mmc), "Unable to get GPIO
+ pin for MMC power\n");
+ goto out;
+ }
+ omap_set_gpio_direction(host->power_pin, 0);
+ }
+
+ ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
+ if (ret)
+ goto out;
+
+ host->dev = &pdev->dev;
+ platform_set_drvdata(pdev, host);
+
+ mmc_add_host(mmc);
+
+ if (host->switch_pin >= 0) {
+ INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host);
+ init_timer(&host->switch_timer);
+ host->switch_timer.function = mmc_omap_switch_timer;
+ host->switch_timer.data = (unsigned long) host;
+ if (omap_request_gpio(host->switch_pin) != 0) {
+ dev_warn(mmc_dev(host->mmc), "Unable to get GPIO pin for MMC cover switch\n");
+ host->switch_pin = -1;
+ goto no_switch;
+ }
+
+ omap_set_gpio_direction(host->switch_pin, 1);
+ ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
+ mmc_omap_switch_irq, SA_TRIGGER_RISING, DRIVER_NAME, host);
+ if (ret) {
+ dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n");
+ omap_free_gpio(host->switch_pin);
+ host->switch_pin = -1;
+ goto no_switch;
+ }
+ ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
+ if (ret == 0) {
+ ret = device_create_file(&pdev->dev, &dev_attr_enable_poll);
+ if (ret != 0)
+ device_remove_file(&pdev->dev, &dev_attr_cover_switch);
+ }
+ if (ret) {
+ dev_wan(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");
+ free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
+ omap_free_gpio(host->switch_pin);
+ host->switch_pin = -1;
+ goto no_switch;
+ }
+ if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host))
+ schedule_work(&host->switch_work);
+ }
+
+no_switch:
+ return 0;
+
+out:
+ /* FIXME: Free other resources too. */
+ if (host) {
+ if (host->iclk && !IS_ERR(host->iclk))
+ clk_put(host->iclk);
+ if (host->fclk && !IS_ERR(host->fclk))
+ clk_put(host->fclk);
+ mmc_free_host(host->mmc);
+ }
+ return ret;
+}
+
+static int mmc_omap_remove(struct platform_device *pdev)
+{
+ struct mmc_omap_host *host = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (host) {
+ mmc_remove_host(host->mmc);
+ free_irq(host->irq, host);
+
+ if (host->power_pin >= 0)
+ omap_free_gpio(host->power_pin);
+ if (host->switch_pin >= 0) {
+ device_remove_file(&pdev->dev, &dev_attr_enable_poll);
+ device_remove_file(&pdev->dev, &dev_attr_cover_switch);
+ free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
+ omap_free_gpio(host->switch_pin);
+ host->switch_pin = -1;
+ del_timer_sync(&host->switch_timer);
+ flush_scheduled_work();
+ }
+ if (host->iclk && !IS_ERR(host->iclk))
+ clk_put(host->iclk);
+ if (host->fclk && !IS_ERR(host->fclk))
+ clk_put(host->fclk);
+ mmc_free_host(host->mmc);
+ }
+
+ release_mem_region(pdev->resource[0].start,
+ pdev->resource[0].end - pdev->resource[0].start + 1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ int ret = 0;
+ struct mmc_omap_host *host = platform_get_drvdata(pdev);
+
+ if (host && host->suspended)
+ return 0;
+
+ if (host) {
+ ret = mmc_suspend_host(host->mmc, mesg);
+ if (ret == 0)
+ host->suspended = 1;
+ }
+ return ret;
+}
+
+static int mmc_omap_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct mmc_omap_host *host = platform_get_drvdata(pdev);
+
+ if (host && !host->suspended)
+ return 0;
+
+ if (host) {
+ ret = mmc_resume_host(host->mmc);
+ if (ret == 0)
+ host->suspended = 0;
+ }
+
+ return ret;
+}
+#else
+#define mmc_omap_suspend NULL
+#define mmc_omap_resume NULL
+#endif
+
+static struct platform_driver mmc_omap_driver = {
+ .probe = mmc_omap_probe,
+ .remove = mmc_omap_remove,
+ .suspend = mmc_omap_suspend,
+ .resume = mmc_omap_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init mmc_omap_init(void)
+{
+ return platform_driver_register(&mmc_omap_driver);
+}
+
+static void __exit mmc_omap_exit(void)
+{
+ platform_driver_unregister(&mmc_omap_driver);
+}
+
+module_init(mmc_omap_init);
+module_exit(mmc_omap_exit);
+
+MODULE_DESCRIPTION("OMAP Multimedia Card driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_AUTHOR("Juha Yrjölä");
diff --git a/drivers/mmc/omap.h b/drivers/mmc/omap.h
new file mode 100644
index 0000000000000..c954d355a5e3e
--- /dev/null
+++ b/drivers/mmc/omap.h
@@ -0,0 +1,55 @@
+#ifndef DRIVERS_MEDIA_MMC_OMAP_H
+#define DRIVERS_MEDIA_MMC_OMAP_H
+
+#define OMAP_MMC_REG_CMD 0x00
+#define OMAP_MMC_REG_ARGL 0x04
+#define OMAP_MMC_REG_ARGH 0x08
+#define OMAP_MMC_REG_CON 0x0c
+#define OMAP_MMC_REG_STAT 0x10
+#define OMAP_MMC_REG_IE 0x14
+#define OMAP_MMC_REG_CTO 0x18
+#define OMAP_MMC_REG_DTO 0x1c
+#define OMAP_MMC_REG_DATA 0x20
+#define OMAP_MMC_REG_BLEN 0x24
+#define OMAP_MMC_REG_NBLK 0x28
+#define OMAP_MMC_REG_BUF 0x2c
+#define OMAP_MMC_REG_SDIO 0x34
+#define OMAP_MMC_REG_REV 0x3c
+#define OMAP_MMC_REG_RSP0 0x40
+#define OMAP_MMC_REG_RSP1 0x44
+#define OMAP_MMC_REG_RSP2 0x48
+#define OMAP_MMC_REG_RSP3 0x4c
+#define OMAP_MMC_REG_RSP4 0x50
+#define OMAP_MMC_REG_RSP5 0x54
+#define OMAP_MMC_REG_RSP6 0x58
+#define OMAP_MMC_REG_RSP7 0x5c
+#define OMAP_MMC_REG_IOSR 0x60
+#define OMAP_MMC_REG_SYSC 0x64
+#define OMAP_MMC_REG_SYSS 0x68
+
+#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
+#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
+#define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
+#define OMAP_MMC_STAT_A_EMPTY (1 << 11)
+#define OMAP_MMC_STAT_A_FULL (1 << 10)
+#define OMAP_MMC_STAT_CMD_CRC (1 << 8)
+#define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
+#define OMAP_MMC_STAT_DATA_CRC (1 << 6)
+#define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
+#define OMAP_MMC_STAT_END_BUSY (1 << 4)
+#define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
+#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
+#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
+
+#define OMAP_MMC_READ(base, reg) __raw_readw((base) + OMAP_MMC_REG_##reg)
+#define OMAP_MMC_WRITE(base, reg, val) __raw_writew((val), (base) + OMAP_MMC_REG_##reg)
+
+/*
+ * Command types
+ */
+#define OMAP_MMC_CMDTYPE_BC 0
+#define OMAP_MMC_CMDTYPE_BCR 1
+#define OMAP_MMC_CMDTYPE_AC 2
+#define OMAP_MMC_CMDTYPE_ADTC 3
+
+#endif
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index c32fad1ce51c8..eb9a8826e9b58 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -37,12 +37,6 @@
#include "pxamci.h"
-#ifdef CONFIG_MMC_DEBUG
-#define DBG(x...) printk(KERN_DEBUG x)
-#else
-#define DBG(x...) do { } while (0)
-#endif
-
#define DRIVER_NAME "pxa2xx-mci"
#define NR_SG 1
@@ -206,7 +200,7 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd,
static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
{
- DBG("PXAMCI: request done\n");
+ pr_debug("PXAMCI: request done\n");
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
@@ -252,7 +246,7 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
if ((cmd->resp[0] & 0x80000000) == 0)
cmd->error = MMC_ERR_BADCRC;
} else {
- DBG("ignoring CRC from command %d - *risky*\n",cmd->opcode);
+ pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode);
}
#else
cmd->error = MMC_ERR_BADCRC;
@@ -317,12 +311,12 @@ static irqreturn_t pxamci_irq(int irq, void *devid, struct pt_regs *regs)
ireg = readl(host->base + MMC_I_REG);
- DBG("PXAMCI: irq %08x\n", ireg);
+ pr_debug("PXAMCI: irq %08x\n", ireg);
if (ireg) {
unsigned stat = readl(host->base + MMC_STAT);
- DBG("PXAMCI: stat %08x\n", stat);
+ pr_debug("PXAMCI: stat %08x\n", stat);
if (ireg & END_CMD_RES)
handled |= pxamci_cmd_done(host, stat);
@@ -376,9 +370,9 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct pxamci_host *host = mmc_priv(mmc);
- DBG("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
- ios->clock, ios->power_mode, ios->vdd / 100,
- ios->vdd % 100);
+ pr_debug("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
+ ios->clock, ios->power_mode, ios->vdd / 100,
+ ios->vdd % 100);
if (ios->clock) {
unsigned int clk = CLOCKRATE / ios->clock;
@@ -405,8 +399,8 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cmdat |= CMDAT_INIT;
}
- DBG("pxamci_set_ios: clkrt = %x cmdat = %x\n",
- host->clkrt, host->cmdat);
+ pr_debug("pxamci_set_ios: clkrt = %x cmdat = %x\n",
+ host->clkrt, host->cmdat);
}
static struct mmc_host_ops pxamci_ops = {
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 8b811d94371c3..bdbfca050029a 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -31,12 +31,8 @@
#define BUGMAIL "<sdhci-devel@list.drzeus.cx>"
-#ifdef CONFIG_MMC_DEBUG
#define DBG(f, x...) \
- printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__,## x)
-#else
-#define DBG(f, x...) do { } while (0)
-#endif
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
static const struct pci_device_id pci_ids[] __devinitdata = {
/* handle any SD host controller */
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 3be397d436fab..511f7b0b31d21 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -44,15 +44,10 @@
#define DRIVER_NAME "wbsd"
#define DRIVER_VERSION "1.5"
-#ifdef CONFIG_MMC_DEBUG
#define DBG(x...) \
- printk(KERN_DEBUG DRIVER_NAME ": " x)
+ pr_debug(DRIVER_NAME ": " x)
#define DBGF(f, x...) \
- printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
-#else
-#define DBG(x...) do { } while (0)
-#define DBGF(x...) do { } while (0)
-#endif
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
/*
* Device resources
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 0f6bb2e625d84..a7ec5954caf54 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -200,27 +200,6 @@ config MTD_CFI_AMDSTD
provides support for one of those command sets, used on chips
including the AMD Am29LV320.
-config MTD_CFI_AMDSTD_RETRY
- int "Retry failed commands (erase/program)"
- depends on MTD_CFI_AMDSTD
- default "0"
- help
- Some chips, when attached to a shared bus, don't properly filter
- bus traffic that is destined to other devices. This broken
- behavior causes erase and program sequences to be aborted when
- the sequences are mixed with traffic for other devices.
-
- SST49LF040 (and related) chips are know to be broken.
-
-config MTD_CFI_AMDSTD_RETRY_MAX
- int "Max retries of failed commands (erase/program)"
- depends on MTD_CFI_AMDSTD_RETRY
- default "0"
- help
- If you have an SST49LF040 (or related chip) then this value should
- be set to at least 1. This can also be adjusted at driver load
- time with the retry_cmd_max module parameter.
-
config MTD_CFI_STAA
tristate "Support for ST (Advanced Architecture) flash chips"
depends on MTD_GEN_PROBE
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
index fdb91b6f1d979..57115618c4968 100644
--- a/drivers/mtd/chips/amd_flash.c
+++ b/drivers/mtd/chips/amd_flash.c
@@ -664,7 +664,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
printk("%s: Probing for AMD compatible flash...\n", map->name);
if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
- sizeof(table)/sizeof(table[0])))
+ ARRAY_SIZE(table)))
== -1) {
printk(KERN_WARNING
"%s: Found no AMD compatible device at location zero\n",
@@ -696,7 +696,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
base += (1 << temp.chipshift)) {
int numchips = temp.numchips;
table_pos[numchips] = probe_new_chip(mtd, base, chips,
- &temp, table, sizeof(table)/sizeof(table[0]));
+ &temp, table, ARRAY_SIZE(table));
}
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index edb306c03c0a1..517ea33e7260f 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -34,6 +34,7 @@
#define MANUFACTURER_MACRONIX 0x00C2
#define MANUFACTURER_NEC 0x0010
#define MANUFACTURER_PMC 0x009D
+#define MANUFACTURER_SHARP 0x00b0
#define MANUFACTURER_SST 0x00BF
#define MANUFACTURER_ST 0x0020
#define MANUFACTURER_TOSHIBA 0x0098
@@ -124,6 +125,9 @@
#define PM49FL004 0x006E
#define PM49FL008 0x006A
+/* Sharp */
+#define LH28F640BF 0x00b0
+
/* ST - www.st.com */
#define M29W800DT 0x00D7
#define M29W800DB 0x005B
@@ -1267,6 +1271,19 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {
ERASEINFO( 0x01000, 256 )
}
+ }, {
+ .mfr_id = MANUFACTURER_SHARP,
+ .dev_id = LH28F640BF,
+ .name = "LH28F640BF",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x40000,16),
+ }
}, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF512,
@@ -2035,7 +2052,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
DEBUG(MTD_DEBUG_LEVEL3,
"Search for id:(%02x %02x) interleave(%d) type(%d)\n",
cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
- for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
index 36f61a6a766e6..3cc0b23c5865c 100644
--- a/drivers/mtd/chips/sharp.c
+++ b/drivers/mtd/chips/sharp.c
@@ -64,7 +64,7 @@
#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */
-struct mtd_info *sharp_probe(struct map_info *);
+static struct mtd_info *sharp_probe(struct map_info *);
static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd);
@@ -96,7 +96,6 @@ struct sharp_info{
struct flchip chips[1];
};
-struct mtd_info *sharp_probe(struct map_info *map);
static void sharp_destroy(struct mtd_info *mtd);
static struct mtd_chip_driver sharp_chipdrv = {
@@ -107,7 +106,7 @@ static struct mtd_chip_driver sharp_chipdrv = {
};
-struct mtd_info *sharp_probe(struct map_info *map)
+static struct mtd_info *sharp_probe(struct map_info *map)
{
struct mtd_info *mtd = NULL;
struct sharp_info *sharp = NULL;
@@ -581,7 +580,7 @@ static void sharp_destroy(struct mtd_info *mtd)
}
-int __init sharp_probe_init(void)
+static int __init sharp_probe_init(void)
{
printk("MTD Sharp chip driver <ds@lineo.com>\n");
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 6b8bb2e4dcfde..a7a7bfe33879e 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -42,7 +42,8 @@
/* special size referring to all the remaining space in a partition */
-#define SIZE_REMAINING 0xffffffff
+#define SIZE_REMAINING UINT_MAX
+#define OFFSET_CONTINUOUS UINT_MAX
struct cmdline_mtd_partition {
struct cmdline_mtd_partition *next;
@@ -75,7 +76,7 @@ static struct mtd_partition * newpart(char *s,
{
struct mtd_partition *parts;
unsigned long size;
- unsigned long offset = 0;
+ unsigned long offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
@@ -314,7 +315,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
{
for(i = 0, offset = 0; i < part->num_parts; i++)
{
- if (!part->parts[i].offset)
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
part->parts[i].offset = offset;
else
offset = part->parts[i].offset;
diff --git a/drivers/mtd/devices/blkmtd.c b/drivers/mtd/devices/blkmtd.c
index 04f864d238db2..79f2e1f23ebd7 100644
--- a/drivers/mtd/devices/blkmtd.c
+++ b/drivers/mtd/devices/blkmtd.c
@@ -28,8 +28,9 @@
#include <linux/pagemap.h>
#include <linux/list.h>
#include <linux/init.h>
+#include <linux/mount.h>
#include <linux/mtd/mtd.h>
-
+#include <linux/mutex.h>
#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)
#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)
@@ -46,7 +47,7 @@ struct blkmtd_dev {
struct list_head list;
struct block_device *blkdev;
struct mtd_info mtd_info;
- struct semaphore wrbuf_mutex;
+ struct mutex wrbuf_mutex;
};
@@ -268,7 +269,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
if(end_len)
pagecnt++;
- down(&dev->wrbuf_mutex);
+ mutex_lock(&dev->wrbuf_mutex);
DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
start_len, len, end_len, pagecnt);
@@ -376,7 +377,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
blkmtd_write_out(bio);
DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
- up(&dev->wrbuf_mutex);
+ mutex_unlock(&dev->wrbuf_mutex);
if(retlen)
*retlen = thislen;
@@ -614,8 +615,6 @@ static struct mtd_erase_region_info *calc_erase_regions(
}
-extern dev_t __init name_to_dev_t(const char *line);
-
static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size)
{
struct block_device *bdev;
@@ -659,7 +658,7 @@ static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size
memset(dev, 0, sizeof(struct blkmtd_dev));
dev->blkdev = bdev;
if(!readonly) {
- init_MUTEX(&dev->wrbuf_mutex);
+ mutex_init(&dev->wrbuf_mutex);
}
dev->mtd_info.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 7ff403b2a0a05..4160b8334c53e 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/buffer_head.h>
+#include <linux/mutex.h>
#define VERSION "$Revision: 1.30 $"
@@ -31,7 +32,7 @@ struct block2mtd_dev {
struct list_head list;
struct block_device *blkdev;
struct mtd_info mtd;
- struct semaphore write_mutex;
+ struct mutex write_mutex;
};
@@ -134,9 +135,9 @@ static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
int err;
instr->state = MTD_ERASING;
- down(&dev->write_mutex);
+ mutex_lock(&dev->write_mutex);
err = _block2mtd_erase(dev, from, len);
- up(&dev->write_mutex);
+ mutex_unlock(&dev->write_mutex);
if (err) {
ERROR("erase failed err = %d", err);
instr->state = MTD_ERASE_FAILED;
@@ -249,9 +250,9 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to + len > mtd->size)
len = mtd->size - to;
- down(&dev->write_mutex);
+ mutex_lock(&dev->write_mutex);
err = _block2mtd_write(dev, buf, to, len, retlen);
- up(&dev->write_mutex);
+ mutex_unlock(&dev->write_mutex);
if (err > 0)
err = 0;
return err;
@@ -310,7 +311,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
goto devinit_err;
}
- init_MUTEX(&dev->write_mutex);
+ mutex_init(&dev->write_mutex);
/* Setup the MTD structure */
/* make the name contain the block device in */
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index e4345cf744a22..23e7a5c7d2c13 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/mutex.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -605,7 +606,7 @@ static void DoC2k_init(struct mtd_info *mtd)
this->curfloor = -1;
this->curchip = -1;
- init_MUTEX(&this->lock);
+ mutex_init(&this->lock);
/* Ident all the chips present. */
DoC_ScanChips(this, maxchips);
@@ -645,7 +646,7 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
if (from >= this->totlen)
return -EINVAL;
- down(&this->lock);
+ mutex_lock(&this->lock);
*retlen = 0;
while (left) {
@@ -774,7 +775,7 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
buf += len;
}
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
@@ -803,7 +804,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
if (to >= this->totlen)
return -EINVAL;
- down(&this->lock);
+ mutex_lock(&this->lock);
*retlen = 0;
while (left) {
@@ -873,7 +874,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
printk(KERN_ERR "Error programming flash\n");
/* Error in programming */
*retlen = 0;
- up(&this->lock);
+ mutex_unlock(&this->lock);
return -EIO;
}
@@ -935,7 +936,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
printk(KERN_ERR "Error programming flash\n");
/* Error in programming */
*retlen = 0;
- up(&this->lock);
+ mutex_unlock(&this->lock);
return -EIO;
}
@@ -956,7 +957,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
if (ret) {
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
}
@@ -966,7 +967,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
buf += len;
}
- up(&this->lock);
+ mutex_unlock(&this->lock);
return 0;
}
@@ -975,13 +976,13 @@ static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
u_char *eccbuf, struct nand_oobinfo *oobsel)
{
static char static_buf[512];
- static DECLARE_MUTEX(writev_buf_sem);
+ static DEFINE_MUTEX(writev_buf_mutex);
size_t totretlen = 0;
size_t thisvecofs = 0;
int ret= 0;
- down(&writev_buf_sem);
+ mutex_lock(&writev_buf_mutex);
while(count) {
size_t thislen, thisretlen;
@@ -1024,7 +1025,7 @@ static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
to += thislen;
}
- up(&writev_buf_sem);
+ mutex_unlock(&writev_buf_mutex);
*retlen = totretlen;
return ret;
}
@@ -1037,7 +1038,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
int len256 = 0, ret;
struct Nand *mychip;
- down(&this->lock);
+ mutex_lock(&this->lock);
mychip = &this->chips[ofs >> this->chipshift];
@@ -1083,7 +1084,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
ret = DoC_WaitReady(this);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
@@ -1197,10 +1198,10 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
struct DiskOnChip *this = mtd->priv;
int ret;
- down(&this->lock);
+ mutex_lock(&this->lock);
ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
@@ -1214,10 +1215,10 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
struct Nand *mychip;
int status;
- down(&this->lock);
+ mutex_lock(&this->lock);
if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
- up(&this->lock);
+ mutex_unlock(&this->lock);
return -EINVAL;
}
@@ -1265,7 +1266,7 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
callback:
mtd_erase_callback(instr);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return 0;
}
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 1e876fcb04084..29b0ddaa324e4 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -581,8 +581,6 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
/***************************************************************************************************/
-#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
-
static struct mtd_info mtd;
static struct mtd_erase_region_info erase_regions[] = {
@@ -640,7 +638,7 @@ int __init lart_flash_init (void)
mtd.flags = MTD_CAP_NORFLASH;
mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
- mtd.numeraseregions = NB_OF (erase_regions);
+ mtd.numeraseregions = ARRAY_SIZE(erase_regions);
mtd.eraseregions = erase_regions;
mtd.erase = flash_erase;
mtd.read = flash_read;
@@ -670,9 +668,9 @@ int __init lart_flash_init (void)
result,mtd.eraseregions[result].numblocks);
#ifdef HAVE_PARTITIONS
- printk ("\npartitions = %d\n",NB_OF (lart_partitions));
+ printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
- for (result = 0; result < NB_OF (lart_partitions); result++)
+ for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
printk (KERN_DEBUG
"\n\n"
"lart_partitions[%d].name = %s\n"
@@ -687,7 +685,7 @@ int __init lart_flash_init (void)
#ifndef HAVE_PARTITIONS
result = add_mtd_device (&mtd);
#else
- result = add_mtd_partitions (&mtd,lart_partitions,NB_OF (lart_partitions));
+ result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions));
#endif
return (result);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index d5f24089be717..04e65d5dae000 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -186,7 +186,7 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
struct m25p *flash = mtd_to_m25p(mtd);
u32 addr,len;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+ DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
flash->spi->dev.bus_id, __FUNCTION__, "at",
(u32)instr->addr, instr->len);
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 0ff2e4378244c..485f663493d29 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -308,7 +308,7 @@ static int __init ms02nv_init(void)
break;
}
- for (i = 0; i < (sizeof(ms02nv_addrs) / sizeof(*ms02nv_addrs)); i++)
+ for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++)
if (!ms02nv_init_one(ms02nv_addrs[i] << stride))
count++;
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8a544890173d3..a3b92479719db 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -47,9 +47,6 @@
*/
#define MAX_LOOPS 10000
-extern void INFTL_dumptables(struct INFTLrecord *inftl);
-extern void INFTL_dumpVUchains(struct INFTLrecord *inftl);
-
static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct INFTLrecord *inftl;
@@ -132,7 +129,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
}
#ifdef PSYCHO_DEBUG
- printk(KERN_INFO "INFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a');
+ printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
#endif
return;
}
@@ -885,8 +882,6 @@ static struct mtd_blktrans_ops inftl_tr = {
.owner = THIS_MODULE,
};
-extern char inftlmountrev[];
-
static int __init init_inftl(void)
{
printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.19 $, "
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
index a57791a6ce402..b933a2a27b18c 100644
--- a/drivers/mtd/maps/alchemy-flash.c
+++ b/drivers/mtd/maps/alchemy-flash.c
@@ -126,8 +126,6 @@ static struct mtd_partition alchemy_partitions[] = {
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
static struct mtd_info *mymtd;
int __init alchemy_mtd_init(void)
@@ -154,7 +152,7 @@ int __init alchemy_mtd_init(void)
* Static partition definition selection
*/
parts = alchemy_partitions;
- nb_parts = NB_OF(alchemy_partitions);
+ nb_parts = ARRAY_SIZE(alchemy_partitions);
alchemy_map.size = window_size;
/*
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 6a8c0415bde87..fd0f0d3187de3 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -86,7 +86,7 @@ struct mtd_partition flagadm_parts[] = {
}
};
-#define PARTITION_COUNT (sizeof(flagadm_parts)/sizeof(struct mtd_partition))
+#define PARTITION_COUNT ARRAY_SIZE(flagadm_parts)
static struct mtd_info *mymtd;
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index 49d90542fc752..652813cd6c2d4 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -57,7 +57,7 @@ static struct mtd_partition partition_info[]= {
}
};
-#define NUM_PARTITIONS (sizeof(partition_info) / sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#define WINDOW_ADDR 0x10000000
#define WINDOW_SIZE 0x800000
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index efb221692641e..c299d10b33e6e 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -300,7 +300,7 @@ static struct mtd_partition partition_info[]=
},
};
-#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
static struct mtd_info *mymtd;
static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
@@ -345,7 +345,7 @@ static struct mtd_partition higlvl_partition_info[]=
},
};
-#define NUM_HIGHLVL_PARTITIONS (sizeof(higlvl_partition_info)/sizeof(partition_info[0]))
+#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
static int dnp_adnp_probe(void)
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index b993ac01a9a5f..2bb3c0f0f9704 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -99,7 +99,7 @@ static struct mtd_info *this_mtd;
static int __init init_svme182(void)
{
struct mtd_partition *partitions;
- int num_parts = sizeof(svme182_partitions) / sizeof(struct mtd_partition);
+ int num_parts = ARRAY_SIZE(svme182_partitions);
partitions = svme182_partitions;
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 3190948211011..0667101ccbe1d 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -59,7 +59,7 @@ static struct mtd_partition h720x_partitions[] = {
}
};
-#define NUM_PARTITIONS (sizeof(h720x_partitions)/sizeof(h720x_partitions[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
static int nr_mtd_parts;
static struct mtd_partition *mtd_parts;
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index 33060a3157221..ed215470158bf 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -76,7 +76,7 @@ static struct mtd_partition partition_info[]={
.size = 0x80000
},
};
-#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#define WINDOW_SIZE 0x00100000
#define WINDOW_ADDR 0x00200000
@@ -88,7 +88,7 @@ static struct map_info netsc520_map = {
.phys = WINDOW_ADDR,
};
-#define NUM_FLASH_BANKS (sizeof(netsc520_map)/sizeof(struct map_info))
+#define NUM_FLASH_BANKS ARRAY_SIZE(netsc520_map)
static struct mtd_info *mymtd;
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 632eb2aa968f4..54a3102ab19a8 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -128,8 +128,7 @@ static struct mtd_partition nettel_amd_partitions[] = {
}
};
-#define NUM_AMD_PARTITIONS \
- (sizeof(nettel_amd_partitions)/sizeof(nettel_amd_partitions[0]))
+#define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions)
/****************************************************************************/
diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c
index c223514ca2ebe..a21fcd195ab40 100644
--- a/drivers/mtd/maps/ocotea.c
+++ b/drivers/mtd/maps/ocotea.c
@@ -58,8 +58,6 @@ static struct mtd_partition ocotea_large_partitions[] = {
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
int __init init_ocotea(void)
{
u8 fpga0_reg;
@@ -97,7 +95,7 @@ int __init init_ocotea(void)
if (flash) {
flash->owner = THIS_MODULE;
add_mtd_partitions(flash, ocotea_small_partitions,
- NB_OF(ocotea_small_partitions));
+ ARRAY_SIZE(ocotea_small_partitions));
} else {
printk("map probe failed for flash\n");
return -ENXIO;
@@ -118,7 +116,7 @@ int __init init_ocotea(void)
if (flash) {
flash->owner = THIS_MODULE;
add_mtd_partitions(flash, ocotea_large_partitions,
- NB_OF(ocotea_large_partitions));
+ ARRAY_SIZE(ocotea_large_partitions));
} else {
printk("map probe failed for flash\n");
return -ENXIO;
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 21822c2edbe40..d2ab1bae9c346 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -334,9 +334,6 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
release:
- if (mtd)
- map_destroy(mtd);
-
if (map) {
map->exit(dev, map);
kfree(map);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index f988c817e196c..d27f4129afd3c 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -54,7 +54,7 @@ static const int debug = 0;
#define MAX_PCMCIA_ADDR 0x4000000
struct pcmciamtd_dev {
- dev_link_t link; /* PCMCIA link */
+ struct pcmcia_device *p_dev;
dev_node_t node; /* device node */
caddr_t win_base; /* ioremapped address of PCMCIA window */
unsigned int win_size; /* size of window */
@@ -111,8 +111,8 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
memreq_t mrq;
int ret;
- if(!(dev->link.state & DEV_PRESENT)) {
- DEBUG(1, "device removed state = 0x%4.4X", dev->link.state);
+ if (!pcmcia_dev_present(dev->p_dev)) {
+ DEBUG(1, "device removed");
return 0;
}
@@ -122,7 +122,7 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
dev->offset, mrq.CardOffset);
mrq.Page = 0;
if( (ret = pcmcia_map_mem_page(win, &mrq)) != CS_SUCCESS) {
- cs_error(dev->link.handle, MapMemPage, ret);
+ cs_error(dev->p_dev, MapMemPage, ret);
return NULL;
}
dev->offset = mrq.CardOffset;
@@ -238,7 +238,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
/* read/write{8,16} copy_{from,to} routines with direct access */
-#define DEV_REMOVED(x) (!(*(u_int *)x->map_priv_1 & DEV_PRESENT))
+#define DEV_REMOVED(x) (!(pcmcia_dev_present(((struct pcmciamtd_dev *)map->map_priv_1)->p_dev)))
static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
{
@@ -319,7 +319,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
static void pcmciamtd_set_vpp(struct map_info *map, int on)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
- dev_link_t *link = &dev->link;
+ struct pcmcia_device *link = dev->p_dev;
modconf_t mod;
int ret;
@@ -328,9 +328,9 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
mod.Vpp1 = mod.Vpp2 = on ? dev->vpp : 0;
DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
- ret = pcmcia_modify_configuration(link->handle, &mod);
+ ret = pcmcia_modify_configuration(link, &mod);
if(ret != CS_SUCCESS) {
- cs_error(link->handle, ModifyConfiguration, ret);
+ cs_error(link, ModifyConfiguration, ret);
}
}
@@ -340,7 +340,7 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
* still open, this will be postponed until it is closed.
*/
-static void pcmciamtd_release(dev_link_t *link)
+static void pcmciamtd_release(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
@@ -353,12 +353,11 @@ static void pcmciamtd_release(dev_link_t *link)
}
pcmcia_release_window(link->win);
}
- pcmcia_release_configuration(link->handle);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
}
-static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_name)
+static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name)
{
int rc;
tuple_t tuple;
@@ -371,16 +370,16 @@ static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_
tuple.TupleOffset = 0;
tuple.DesiredTuple = RETURN_FIRST_TUPLE;
- rc = pcmcia_get_first_tuple(link->handle, &tuple);
+ rc = pcmcia_get_first_tuple(link, &tuple);
while(rc == CS_SUCCESS) {
- rc = pcmcia_get_tuple_data(link->handle, &tuple);
+ rc = pcmcia_get_tuple_data(link, &tuple);
if(rc != CS_SUCCESS) {
- cs_error(link->handle, GetTupleData, rc);
+ cs_error(link, GetTupleData, rc);
break;
}
- rc = pcmcia_parse_tuple(link->handle, &tuple, &parse);
+ rc = pcmcia_parse_tuple(link, &tuple, &parse);
if(rc != CS_SUCCESS) {
- cs_error(link->handle, ParseTuple, rc);
+ cs_error(link, ParseTuple, rc);
break;
}
@@ -451,7 +450,7 @@ static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_
DEBUG(2, "Unknown tuple code %d", tuple.TupleCode);
}
- rc = pcmcia_get_next_tuple(link->handle, &tuple);
+ rc = pcmcia_get_next_tuple(link, &tuple);
}
if(!dev->pcmcia_map.size)
dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
@@ -488,7 +487,7 @@ static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void pcmciamtd_config(dev_link_t *link)
+static int pcmciamtd_config(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
struct mtd_info *mtd = NULL;
@@ -504,13 +503,10 @@ static void pcmciamtd_config(dev_link_t *link)
DEBUG(3, "link=0x%p", link);
- /* Configure card */
- link->state |= DEV_CONFIG;
-
DEBUG(2, "Validating CIS");
- ret = pcmcia_validate_cis(link->handle, &cisinfo);
+ ret = pcmcia_validate_cis(link, &cisinfo);
if(ret != CS_SUCCESS) {
- cs_error(link->handle, GetTupleData, ret);
+ cs_error(link, GetTupleData, ret);
} else {
DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains);
}
@@ -538,7 +534,7 @@ static void pcmciamtd_config(dev_link_t *link)
req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
req.Base = 0;
req.AccessSpeed = mem_speed;
- link->win = (window_handle_t)link->handle;
+ link->win = (window_handle_t)link;
req.Size = (force_size) ? force_size << 20 : MAX_PCMCIA_ADDR;
dev->win_size = 0;
@@ -546,7 +542,7 @@ static void pcmciamtd_config(dev_link_t *link)
int ret;
DEBUG(2, "requesting window with size = %dKiB memspeed = %d",
req.Size >> 10, req.AccessSpeed);
- ret = pcmcia_request_window(&link->handle, &req, &link->win);
+ ret = pcmcia_request_window(&link, &req, &link->win);
DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
if(ret) {
req.Size >>= 1;
@@ -562,19 +558,19 @@ static void pcmciamtd_config(dev_link_t *link)
if(!dev->win_size) {
err("Cant allocate memory window");
pcmciamtd_release(link);
- return;
+ return -ENODEV;
}
DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
/* Get write protect status */
- CS_CHECK(GetStatus, pcmcia_get_status(link->handle, &status));
+ CS_CHECK(GetStatus, pcmcia_get_status(link, &status));
DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx",
status.CardState, (unsigned long)link->win);
dev->win_base = ioremap(req.Base, req.Size);
if(!dev->win_base) {
err("ioremap(%lu, %u) failed", req.Base, req.Size);
pcmciamtd_release(link);
- return;
+ return -ENODEV;
}
DEBUG(1, "mapped window dev = %p req.base = 0x%lx base = %p size = 0x%x",
dev, req.Base, dev->win_base, req.Size);
@@ -584,17 +580,14 @@ static void pcmciamtd_config(dev_link_t *link)
dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
DEBUG(2, "Getting configuration");
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link->handle, &t));
+ CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &t));
DEBUG(2, "Vcc = %d Vpp1 = %d Vpp2 = %d", t.Vcc, t.Vpp1, t.Vpp2);
dev->vpp = (vpp) ? vpp : t.Vpp1;
link->conf.Attributes = 0;
- link->conf.Vcc = t.Vcc;
if(setvpp == 2) {
- link->conf.Vpp1 = dev->vpp;
- link->conf.Vpp2 = dev->vpp;
+ link->conf.Vpp = dev->vpp;
} else {
- link->conf.Vpp1 = 0;
- link->conf.Vpp2 = 0;
+ link->conf.Vpp = 0;
}
link->conf.IntType = INT_MEMORY;
@@ -606,9 +599,10 @@ static void pcmciamtd_config(dev_link_t *link)
link->conf.ConfigIndex = 0;
link->conf.Present = t.Present;
DEBUG(2, "Setting Configuration");
- ret = pcmcia_request_configuration(link->handle, &link->conf);
+ ret = pcmcia_request_configuration(link, &link->conf);
if(ret != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, ret);
+ cs_error(link, RequestConfiguration, ret);
+ return -ENODEV;
}
if(mem_type == 1) {
@@ -616,7 +610,7 @@ static void pcmciamtd_config(dev_link_t *link)
} else if(mem_type == 2) {
mtd = do_map_probe("map_rom", &dev->pcmcia_map);
} else {
- for(i = 0; i < sizeof(probes) / sizeof(char *); i++) {
+ for(i = 0; i < ARRAY_SIZE(probes); i++) {
DEBUG(1, "Trying %s", probes[i]);
mtd = do_map_probe(probes[i], &dev->pcmcia_map);
if(mtd)
@@ -629,7 +623,7 @@ static void pcmciamtd_config(dev_link_t *link)
if(!mtd) {
DEBUG(1, "Cant find an MTD");
pcmciamtd_release(link);
- return;
+ return -ENODEV;
}
dev->mtd_info = mtd;
@@ -654,7 +648,6 @@ static void pcmciamtd_config(dev_link_t *link)
use the faster non-remapping read/write functions */
if(mtd->size <= dev->win_size) {
DEBUG(1, "Using non remapping memory functions");
- dev->pcmcia_map.map_priv_1 = (unsigned long)&(dev->link.state);
dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
if (dev->pcmcia_map.bankwidth == 1) {
dev->pcmcia_map.read = pcmcia_read8;
@@ -672,19 +665,18 @@ static void pcmciamtd_config(dev_link_t *link)
dev->mtd_info = NULL;
err("Couldnt register MTD device");
pcmciamtd_release(link);
- return;
+ return -ENODEV;
}
snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index);
info("mtd%d: %s", mtd->index, mtd->name);
- link->state &= ~DEV_CONFIG_PENDING;
- link->dev = &dev->node;
- return;
+ link->dev_node = &dev->node;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
err("CS Error, exiting");
pcmciamtd_release(link);
- return;
+ return -ENODEV;
}
@@ -713,21 +705,18 @@ static int pcmciamtd_resume(struct pcmcia_device *dev)
* when the device is released.
*/
-static void pcmciamtd_detach(struct pcmcia_device *p_dev)
+static void pcmciamtd_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
+ struct pcmciamtd_dev *dev = link->priv;
DEBUG(3, "link=0x%p", link);
- if(link->state & DEV_CONFIG) {
- struct pcmciamtd_dev *dev = link->priv;
- if(dev->mtd_info) {
- del_mtd_device(dev->mtd_info);
- info("mtd%d: Removed", dev->mtd_info->index);
- }
-
- pcmciamtd_release(link);
+ if(dev->mtd_info) {
+ del_mtd_device(dev->mtd_info);
+ info("mtd%d: Removed", dev->mtd_info->index);
}
+
+ pcmciamtd_release(link);
}
@@ -736,10 +725,9 @@ static void pcmciamtd_detach(struct pcmcia_device *p_dev)
* with Card Services.
*/
-static int pcmciamtd_attach(struct pcmcia_device *p_dev)
+static int pcmciamtd_probe(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev;
- dev_link_t *link;
/* Create new memory card device */
dev = kmalloc(sizeof(*dev), GFP_KERNEL);
@@ -747,20 +735,13 @@ static int pcmciamtd_attach(struct pcmcia_device *p_dev)
DEBUG(1, "dev=0x%p", dev);
memset(dev, 0, sizeof(*dev));
- link = &dev->link;
+ dev->p_dev = link;
link->priv = dev;
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY;
- link->next = NULL;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- pcmciamtd_config(link);
-
- return 0;
+ return pcmciamtd_config(link);
}
static struct pcmcia_device_id pcmciamtd_ids[] = {
@@ -794,7 +775,7 @@ static struct pcmcia_driver pcmciamtd_driver = {
.drv = {
.name = "pcmciamtd"
},
- .probe = pcmciamtd_attach,
+ .probe = pcmciamtd_probe,
.remove = pcmciamtd_detach,
.owner = THIS_MODULE,
.id_table = pcmciamtd_ids,
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index 5b76ed8861859..50b14033613f6 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -121,8 +121,7 @@ struct map_info redwood_flash_map = {
};
-#define NUM_REDWOOD_FLASH_PARTITIONS \
- (sizeof(redwood_flash_partitions)/sizeof(redwood_flash_partitions[0]))
+#define NUM_REDWOOD_FLASH_PARTITIONS ARRAY_SIZE(redwood_flash_partitions)
static struct mtd_info *redwood_mtd;
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index 225cdd9ba5b2f..350286dc1d2ee 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -66,7 +66,7 @@ static struct map_info sbc8240_map[2] = {
}
};
-#define NUM_FLASH_BANKS (sizeof(sbc8240_map) / sizeof(struct map_info))
+#define NUM_FLASH_BANKS ARRAY_SIZE(sbc8240_map)
/*
* The following defines the partition layout of SBC8240 boards.
@@ -125,8 +125,6 @@ static struct mtd_partition sbc8240_fs_partitions [] = {
}
};
-#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
-
/* trivial struct to describe partition information */
struct mtd_part_def
{
@@ -190,10 +188,10 @@ int __init init_sbc8240_mtd (void)
#ifdef CONFIG_MTD_PARTITIONS
sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions;
sbc8240_part_banks[0].type = "static image";
- sbc8240_part_banks[0].nums = NB_OF(sbc8240_uboot_partitions);
+ sbc8240_part_banks[0].nums = ARRAY_SIZE(sbc8240_uboot_partitions);
sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions;
sbc8240_part_banks[1].type = "static file system";
- sbc8240_part_banks[1].nums = NB_OF(sbc8240_fs_partitions);
+ sbc8240_part_banks[1].nums = ARRAY_SIZE(sbc8240_fs_partitions);
for (i = 0; i < NUM_FLASH_BANKS; i++) {
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index ed92afadd8a91..e8c130e1efd32 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -107,7 +107,7 @@ static struct map_info sc520cdp_map[] = {
},
};
-#define NUM_FLASH_BANKS (sizeof(sc520cdp_map)/sizeof(struct map_info))
+#define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map)
static struct mtd_info *mymtd[NUM_FLASH_BANKS];
static struct mtd_info *merged_mtd;
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 2c91dff8bb60c..28b8a571a91a4 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -70,7 +70,7 @@ static struct mtd_partition partition_info[] = {
.size = 0x80000
},
};
-#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#endif
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 999f4bb3d845c..12fe53c0d2fc1 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -49,8 +49,6 @@ static struct mtd_partition sharpsl_partitions[1] = {
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
int __init init_sharpsl(void)
{
struct mtd_partition *parts;
@@ -92,7 +90,7 @@ int __init init_sharpsl(void)
}
parts = sharpsl_partitions;
- nb_parts = NB_OF(sharpsl_partitions);
+ nb_parts = ARRAY_SIZE(sharpsl_partitions);
printk(KERN_NOTICE "Using %s partision definition\n", part_type);
add_mtd_partitions(mymtd, parts, nb_parts);
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 4b372bcb17f1c..a7422c200567d 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -64,7 +64,7 @@ static struct mtd_partition ts5500_partitions[] = {
}
};
-#define NUM_PARTITIONS (sizeof(ts5500_partitions)/sizeof(struct mtd_partition))
+#define NUM_PARTITIONS ARRAY_SIZE(ts5500_partitions)
static struct mtd_info *mymtd;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 79d92808b766f..f7264dc2ac9bc 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -37,7 +37,7 @@ struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
-#define NUM_PARTITIONS (sizeof(uclinux_romfs) / sizeof(uclinux_romfs[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(uclinux_romfs)
/****************************************************************************/
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index e0063941c0df3..b3e4873954355 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -182,7 +182,7 @@ int __init init_vmax301(void)
}
}
- if (!vmax_mtd[1] && !vmax_mtd[2]) {
+ if (!vmax_mtd[0] && !vmax_mtd[1]) {
iounmap((void *)iomapadr);
return -ENXIO;
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 840dd66ce2dc6..458d3c8ae1eee 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -19,12 +19,12 @@
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
static LIST_HEAD(blktrans_majors);
-extern struct semaphore mtd_table_mutex;
+extern struct mutex mtd_table_mutex;
extern struct mtd_info *mtd_table[];
struct mtd_blkcore_priv {
@@ -122,9 +122,9 @@ static int mtd_blktrans_thread(void *arg)
spin_unlock_irq(rq->queue_lock);
- down(&dev->sem);
+ mutex_lock(&dev->lock);
res = do_blktrans_request(tr, dev, req);
- up(&dev->sem);
+ mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
@@ -235,8 +235,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
int last_devnum = -1;
struct gendisk *gd;
- if (!down_trylock(&mtd_table_mutex)) {
- up(&mtd_table_mutex);
+ if (!!mutex_trylock(&mtd_table_mutex)) {
+ mutex_unlock(&mtd_table_mutex);
BUG();
}
@@ -267,7 +267,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
return -EBUSY;
}
- init_MUTEX(&new->sem);
+ mutex_init(&new->lock);
list_add_tail(&new->list, &tr->devs);
added:
if (!tr->writesect)
@@ -313,8 +313,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
- if (!down_trylock(&mtd_table_mutex)) {
- up(&mtd_table_mutex);
+ if (!!mutex_trylock(&mtd_table_mutex)) {
+ mutex_unlock(&mtd_table_mutex);
BUG();
}
@@ -378,14 +378,14 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
ret = register_blkdev(tr->major, tr->name);
if (ret) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
kfree(tr->blkcore_priv);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
spin_lock_init(&tr->blkcore_priv->queue_lock);
@@ -396,7 +396,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!tr->blkcore_priv->rq) {
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return -ENOMEM;
}
@@ -407,7 +407,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -419,7 +419,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
tr->add_mtd(tr, mtd_table[i]);
}
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return 0;
}
@@ -428,7 +428,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
struct list_head *this, *next;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
/* Clean up the kernel thread */
tr->blkcore_priv->exiting = 1;
@@ -446,7 +446,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
kfree(tr->blkcore_priv);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index e84756644fd1e..2cef280e388c7 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -19,11 +19,13 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
+#include <linux/mutex.h>
+
static struct mtdblk_dev {
struct mtd_info *mtd;
int count;
- struct semaphore cache_sem;
+ struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
@@ -284,7 +286,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
mtdblk->count = 1;
mtdblk->mtd = mtd;
- init_MUTEX (&mtdblk->cache_sem);
+ mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
mtdblk->mtd->erasesize) {
@@ -306,9 +308,9 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
- down(&mtdblk->cache_sem);
+ mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
- up(&mtdblk->cache_sem);
+ mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
/* It was the last usage. Free the device */
@@ -327,9 +329,9 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
- down(&mtdblk->cache_sem);
+ mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
- up(&mtdblk->cache_sem);
+ mutex_unlock(&mtdblk->cache_mutex);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index dade02ab0687a..9905870f56e5e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -19,15 +19,13 @@
#include <linux/ioctl.h>
#include <linux/init.h>
#include <linux/mtd/compatmac.h>
-#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
-#endif
#include <linux/mtd/mtd.h>
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
-DECLARE_MUTEX(mtd_table_mutex);
+DEFINE_MUTEX(mtd_table_mutex);
struct mtd_info *mtd_table[MAX_MTD_DEVICES];
EXPORT_SYMBOL_GPL(mtd_table_mutex);
@@ -49,7 +47,7 @@ int add_mtd_device(struct mtd_info *mtd)
{
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
for (i=0; i < MAX_MTD_DEVICES; i++)
if (!mtd_table[i]) {
@@ -67,7 +65,7 @@ int add_mtd_device(struct mtd_info *mtd)
not->add(mtd);
}
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
/* We _know_ we aren't being removed, because
our caller is still holding us here. So none
of this try_ nonsense, and no bitching about it
@@ -76,7 +74,7 @@ int add_mtd_device(struct mtd_info *mtd)
return 0;
}
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return 1;
}
@@ -94,7 +92,7 @@ int del_mtd_device (struct mtd_info *mtd)
{
int ret;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
if (mtd_table[mtd->index] != mtd) {
ret = -ENODEV;
@@ -118,7 +116,7 @@ int del_mtd_device (struct mtd_info *mtd)
ret = 0;
}
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -135,7 +133,7 @@ void register_mtd_user (struct mtd_notifier *new)
{
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
list_add(&new->list, &mtd_notifiers);
@@ -145,7 +143,7 @@ void register_mtd_user (struct mtd_notifier *new)
if (mtd_table[i])
new->add(mtd_table[i]);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
}
/**
@@ -162,7 +160,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
{
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
module_put(THIS_MODULE);
@@ -171,7 +169,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
old->remove(mtd_table[i]);
list_del(&old->list);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return 0;
}
@@ -193,7 +191,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
struct mtd_info *ret = NULL;
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
if (num == -1) {
for (i=0; i< MAX_MTD_DEVICES; i++)
@@ -211,7 +209,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
if (ret)
ret->usecount++;
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -219,9 +217,9 @@ void put_mtd_device(struct mtd_info *mtd)
{
int c;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
c = --mtd->usecount;
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
BUG_ON(c < 0);
module_put(mtd->owner);
@@ -296,10 +294,11 @@ EXPORT_SYMBOL(unregister_mtd_user);
EXPORT_SYMBOL(default_mtd_writev);
EXPORT_SYMBOL(default_mtd_readv);
+#ifdef CONFIG_PROC_FS
+
/*====================================================================*/
/* Support for /proc/mtd */
-#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *proc_mtd;
static inline int mtd_proc_info (char *buf, int i)
@@ -319,7 +318,7 @@ static int mtd_read_proc (char *page, char **start, off_t off, int count,
int len, l, i;
off_t begin = 0;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
len = sprintf(page, "dev: size erasesize name\n");
for (i=0; i< MAX_MTD_DEVICES; i++) {
@@ -337,38 +336,34 @@ static int mtd_read_proc (char *page, char **start, off_t off, int count,
*eof = 1;
done:
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
if (off >= len+begin)
return 0;
*start = page + (off-begin);
return ((count < begin+len-off) ? count : begin+len-off);
}
-#endif /* CONFIG_PROC_FS */
-
/*====================================================================*/
/* Init code */
static int __init init_mtd(void)
{
-#ifdef CONFIG_PROC_FS
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
proc_mtd->read_proc = mtd_read_proc;
-#endif
return 0;
}
static void __exit cleanup_mtd(void)
{
-#ifdef CONFIG_PROC_FS
if (proc_mtd)
remove_proc_entry( "mtd", NULL);
-#endif
}
module_init(init_mtd);
module_exit(cleanup_mtd);
+#endif /* CONFIG_PROC_FS */
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1fc4c134d9391..cfe288a6e8535 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -178,17 +178,16 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
Even if you leave this disabled, you can enable BBT writes at module
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
-
- config MTD_NAND_SHARPSL
- bool "Support for NAND Flash on Sharp SL Series (C7xx + others)"
- depends on MTD_NAND && ARCH_PXA
-
- config MTD_NAND_NANDSIM
- bool "Support for NAND Flash Simulator"
- depends on MTD_NAND && MTD_PARTITIONS
+config MTD_NAND_SHARPSL
+ tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
+ depends on MTD_NAND && ARCH_PXA
+
+config MTD_NAND_NANDSIM
+ tristate "Support for NAND Flash Simulator"
+ depends on MTD_NAND && MTD_PARTITIONS
help
The simulator may simulate verious NAND flash chips for the
MTD nand layer.
-
+
endmenu
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 201e1362da147..bde3550910a2e 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -55,8 +55,6 @@ static const struct mtd_partition partition_info[] = {
.size = MTDPART_SIZ_FULL
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
/**
* au_read_byte - read one byte from the chip
@@ -462,7 +460,7 @@ int __init au1xxx_nand_init (void)
}
/* Register the partitions */
- add_mtd_partitions(au1550_mtd, partition_info, NB_OF(partition_info));
+ add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info));
return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 5d222460b42a8..95e96fa1fcebb 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -80,6 +80,7 @@
#include <linux/mtd/compatmac.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
+#include <linux/leds.h>
#include <asm/io.h>
#ifdef CONFIG_MTD_PARTITIONS
@@ -515,6 +516,8 @@ static int nand_block_checkbad (struct mtd_info *mtd, loff_t ofs, int getchip, i
return nand_isbad_bbt (mtd, ofs, allowbbt);
}
+DEFINE_LED_TRIGGER(nand_led_trigger);
+
/*
* Wait for the ready pin, after a command
* The timeout is catched later.
@@ -524,12 +527,14 @@ static void nand_wait_ready(struct mtd_info *mtd)
struct nand_chip *this = mtd->priv;
unsigned long timeo = jiffies + 2;
+ led_trigger_event(nand_led_trigger, LED_FULL);
/* wait until command is processed or timeout occures */
do {
if (this->dev_ready(mtd))
- return;
+ break;
touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
+ led_trigger_event(nand_led_trigger, LED_OFF);
}
/**
@@ -817,6 +822,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
else
timeo += (HZ * 20) / 1000;
+ led_trigger_event(nand_led_trigger, LED_FULL);
+
/* Apply this short delay always to ensure that we do wait tWB in
* any case on any machine. */
ndelay (100);
@@ -840,6 +847,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
}
cond_resched();
}
+ led_trigger_event(nand_led_trigger, LED_OFF);
+
status = (int) this->read_byte(mtd);
return status;
}
@@ -2724,6 +2733,21 @@ void nand_release (struct mtd_info *mtd)
EXPORT_SYMBOL_GPL (nand_scan);
EXPORT_SYMBOL_GPL (nand_release);
+
+static int __init nand_base_init(void)
+{
+ led_trigger_register_simple("nand-disk", &nand_led_trigger);
+ return 0;
+}
+
+static void __exit nand_base_exit(void)
+{
+ led_trigger_unregister_simple(nand_led_trigger);
+}
+
+module_init(nand_base_init);
+module_exit(nand_base_exit);
+
MODULE_LICENSE ("GPL");
MODULE_AUTHOR ("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>");
MODULE_DESCRIPTION ("Generic NAND flash driver code");
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 8815c8dbef2d0..c077d2ec9cddb 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -85,10 +85,6 @@ static int parse_redboot_partitions(struct mtd_info *master,
numslots = (master->erasesize / sizeof(struct fis_image_desc));
for (i = 0; i < numslots; i++) {
- if (buf[i].name[0] == 0xff) {
- i = numslots;
- break;
- }
if (!memcmp(buf[i].name, "FIS directory", 14)) {
/* This is apparently the FIS directory entry for the
* FIS directory itself. The FIS directory size is
@@ -128,7 +124,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
struct fis_list *new_fl, **prev;
if (buf[i].name[0] == 0xff)
- break;
+ continue;
if (!redboot_checksum(&buf[i]))
break;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 70f63891b19cf..274b0138d4420 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -788,7 +788,7 @@ struct vortex_private {
int options; /* User-settable misc. driver options. */
unsigned int media_override:4, /* Passed-in media type. */
default_media:4, /* Read from the EEPROM/Wn3_Config. */
- full_duplex:1, force_fd:1, autoselect:1,
+ full_duplex:1, autoselect:1,
bus_master:1, /* Vortex can only do a fragment bus-m. */
full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */
@@ -1633,12 +1633,6 @@ vortex_set_duplex(struct net_device *dev)
((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
0x100 : 0),
ioaddr + Wn3_MAC_Ctrl);
-
- issue_and_wait(dev, TxReset);
- /*
- * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
- */
- issue_and_wait(dev, RxReset|0x04);
}
static void vortex_check_media(struct net_device *dev, unsigned int init)
@@ -1663,7 +1657,7 @@ vortex_up(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned int config;
- int i;
+ int i, mii_reg1, mii_reg5;
if (VORTEX_PCI(vp)) {
pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
@@ -1723,14 +1717,23 @@ vortex_up(struct net_device *dev)
printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
iowrite32(config, ioaddr + Wn3_Config);
- netif_carrier_off(dev);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
EL3WINDOW(4);
+ mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
+ mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
+ vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
+
vortex_check_media(dev, 1);
}
else
vortex_set_duplex(dev);
+ issue_and_wait(dev, TxReset);
+ /*
+ * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
+ */
+ issue_and_wait(dev, RxReset|0x04);
+
iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
@@ -2083,16 +2086,14 @@ vortex_error(struct net_device *dev, int status)
}
if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
iowrite8(0, ioaddr + TxStatus);
if (tx_status & 0x30) { /* txJabber or txUnderrun */
do_tx_reset = 1;
- } else if (tx_status & 0x08) { /* maxCollisions */
- vp->xstats.tx_max_collisions++;
- if (vp->drv_flags & MAX_COLLISION_RESET) {
- do_tx_reset = 1;
- reset_mask = 0x0108; /* Reset interface logic, but not download logic */
- }
- } else { /* Merely re-enable the transmitter. */
+ } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
+ do_tx_reset = 1;
+ reset_mask = 0x0108; /* Reset interface logic, but not download logic */
+ } else { /* Merely re-enable the transmitter. */
iowrite16(TxEnable, ioaddr + EL3_CMD);
}
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ce99845d8266a..066e22b01a941 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -539,8 +539,7 @@ rx_status_loop:
unsigned buflen;
skb = cp->rx_skb[rx_tail].skb;
- if (!skb)
- BUG();
+ BUG_ON(!skb);
desc = &cp->rx_ring[rx_tail];
status = le32_to_cpu(desc->opts1);
@@ -723,8 +722,7 @@ static void cp_tx (struct cp_private *cp)
break;
skb = cp->tx_skb[tx_tail].skb;
- if (!skb)
- BUG();
+ BUG_ON(!skb);
pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
@@ -1550,8 +1548,7 @@ static void cp_get_ethtool_stats (struct net_device *dev,
tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
tmp_stats[i++] = cp->cp_stats.rx_frags;
- if (i != CP_NUM_STATS)
- BUG();
+ BUG_ON(i != CP_NUM_STATS);
pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
}
@@ -1856,8 +1853,7 @@ static void cp_remove_one (struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct cp_private *cp = netdev_priv(dev);
- if (!dev)
- BUG();
+ BUG_ON(!dev);
unregister_netdev(dev);
iounmap(cp->regs);
if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e20b849a22e8e..bdaaad8f2123d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2313,13 +2313,11 @@ config S2IO_NAPI
endmenu
-if !UML
source "drivers/net/tokenring/Kconfig"
source "drivers/net/wireless/Kconfig"
source "drivers/net/pcmcia/Kconfig"
-endif
source "drivers/net/wan/Kconfig"
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 64e2caf3083df..fabc0607b0f1d 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -765,8 +765,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
BUGMSG(D_DURING, "in arcnet_interrupt\n");
lp = dev->priv;
- if (!lp)
- BUG();
+ BUG_ON(!lp);
spin_lock(&lp->lock);
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 43150b2bd13fa..0d45553ff75c8 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -125,11 +125,11 @@ static void __init com90xx_probe(void)
if (!io && !irq && !shmem && !*device && com90xx_skip_probe)
return;
- shmems = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(unsigned long),
+ shmems = kzalloc(((0x100000-0xa0000) / 0x800) * sizeof(unsigned long),
GFP_KERNEL);
if (!shmems)
return;
- iomem = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(void __iomem *),
+ iomem = kzalloc(((0x100000-0xa0000) / 0x800) * sizeof(void __iomem *),
GFP_KERNEL);
if (!iomem) {
kfree(shmems);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 15032f2c78179..c4e12b5cbb92c 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -608,8 +608,7 @@ static void b44_tx(struct b44 *bp)
struct ring_info *rp = &bp->tx_buffers[cons];
struct sk_buff *skb = rp->skb;
- if (unlikely(skb == NULL))
- BUG();
+ BUG_ON(skb == NULL);
pci_unmap_single(bp->pdev,
pci_unmap_addr(rp, mapping),
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 30ff8ea1a4026..4391bf4bf573a 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1093,8 +1093,7 @@ static int process_responses(struct adapter *adapter, int budget)
if (likely(e->DataValid)) {
struct freelQ *fl = &sge->freelQ[e->FreelistQid];
- if (unlikely(!e->Sop || !e->Eop))
- BUG();
+ BUG_ON(!e->Sop || !e->Eop);
if (unlikely(e->Offload))
unexpected_offload(adapter, fl);
else
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 49cd096a3c3d0..add8dc4aa7b04 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3308,8 +3308,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
while (poll_dev != &adapter->polling_netdev[i]) {
i++;
- if (unlikely(i == adapter->num_rx_queues))
- BUG();
+ BUG_ON(i == adapter->num_rx_queues);
}
if (likely(adapter->num_tx_queues == 1)) {
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index aa1569182fd62..815436c6170f5 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -203,8 +203,7 @@ static int eql_open(struct net_device *dev)
printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
"your slave devices.\n", dev->name);
- if (!list_empty(&eql->queue.all_slaves))
- BUG();
+ BUG_ON(!list_empty(&eql->queue.all_slaves));
eql->min_slaves = 1;
eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index ceb98fd398afa..52d01027d9e7c 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -235,7 +235,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
pool->free_map[free_index] = index;
pool->skbuff[index] = NULL;
pool->consumer_index--;
@@ -373,7 +373,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
}
@@ -511,7 +511,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->filter_list_dma,
mac_address);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
adapter->buffer_list_dma,
@@ -527,7 +527,7 @@ static int ibmveth_open(struct net_device *netdev)
ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
do {
rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while (H_isLongBusy(rc) || (rc == H_Busy));
+ } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
ibmveth_cleanup(adapter);
return rc;
@@ -556,9 +556,9 @@ static int ibmveth_close(struct net_device *netdev)
do {
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
- if(lpar_rc != H_Success)
+ if(lpar_rc != H_SUCCESS)
{
ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
lpar_rc);
@@ -693,9 +693,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[4].desc,
desc[5].desc,
correlator);
- } while ((lpar_rc == H_Busy) && (retry_count--));
+ } while ((lpar_rc == H_BUSY) && (retry_count--));
- if(lpar_rc != H_Success && lpar_rc != H_Dropped) {
+ if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
int i;
ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
for(i = 0; i < 6; i++) {
@@ -786,14 +786,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
/* we think we are done - reenable interrupts, then check once more to make sure we are done */
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
- ibmveth_assert(lpar_rc == H_Success);
+ ibmveth_assert(lpar_rc == H_SUCCESS);
netif_rx_complete(netdev);
if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
{
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
- ibmveth_assert(lpar_rc == H_Success);
+ ibmveth_assert(lpar_rc == H_SUCCESS);
more_work = 1;
goto restart_poll;
}
@@ -813,7 +813,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs
if(netif_rx_schedule_prep(netdev)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
- ibmveth_assert(lpar_rc == H_Success);
+ ibmveth_assert(lpar_rc == H_SUCCESS);
__netif_rx_schedule(netdev);
}
return IRQ_HANDLED;
@@ -835,7 +835,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
0);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
}
} else {
@@ -847,7 +847,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
IbmVethMcastDisableFiltering |
IbmVethMcastClearFilterTable,
0);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
@@ -858,7 +858,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
}
}
@@ -867,7 +867,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableFiltering,
0);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
}
}
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 63d38fbbd04ec..f530686bd09f8 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -695,8 +695,7 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* We must not be transmitting...
*/
- if (si->txskb)
- BUG();
+ BUG_ON(si->txskb);
netif_stop_queue(dev);
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index d11821dd86edb..ced9fdb8335cc 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -645,9 +645,7 @@ static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- if (!dev)
- BUG();
-
+ BUG_ON(!dev);
unregister_netdev(dev);
release_region(dev->base_addr, NE_IO_EXTENT);
free_netdev(dev);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index edd1b5306b16e..75b35ad760de3 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -94,7 +94,7 @@ static struct console netconsole = {
static int option_setup(char *opt)
{
configured = !netpoll_parse_options(&np, opt);
- return 0;
+ return 1;
}
__setup("netconsole=", option_setup);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 8e9b1a537deea..706aed7d717f5 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -568,8 +568,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
#endif
sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
- if (unlikely(NULL != dev->rx_info.skbs[next_empty]))
- BUG();
+ BUG_ON(NULL != dev->rx_info.skbs[next_empty]);
dev->rx_info.skbs[next_empty] = skb;
dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index ce90becb8bdf3..fab93360f0170 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -204,7 +204,7 @@ enum Window4 { /* Window 4: Xcvr/media bits. */
#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
struct el3_private {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct net_device_stats stats;
u16 advertising, partner; /* NWay media advertisement */
@@ -225,8 +225,8 @@ static char mii_preamble_required = 0;
/* Index of functions. */
-static void tc574_config(dev_link_t *link);
-static void tc574_release(dev_link_t *link);
+static int tc574_config(struct pcmcia_device *link);
+static void tc574_release(struct pcmcia_device *link);
static void mdio_sync(kio_addr_t ioaddr, int bits);
static int mdio_read(kio_addr_t ioaddr, int phy_id, int location);
@@ -256,10 +256,9 @@ static void tc574_detach(struct pcmcia_device *p_dev);
with Card Services.
*/
-static int tc574_attach(struct pcmcia_device *p_dev)
+static int tc574_probe(struct pcmcia_device *link)
{
struct el3_private *lp;
- dev_link_t *link;
struct net_device *dev;
DEBUG(0, "3c574_attach()\n");
@@ -269,8 +268,8 @@ static int tc574_attach(struct pcmcia_device *p_dev)
if (!dev)
return -ENOMEM;
lp = netdev_priv(dev);
- link = &lp->link;
link->priv = dev;
+ lp->p_dev = link;
spin_lock_init(&lp->window_lock);
link->io.NumPorts1 = 32;
@@ -280,7 +279,6 @@ static int tc574_attach(struct pcmcia_device *p_dev)
link->irq.Handler = &el3_interrupt;
link->irq.Instance = dev;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
@@ -298,13 +296,7 @@ static int tc574_attach(struct pcmcia_device *p_dev)
dev->watchdog_timeo = TX_TIMEOUT;
#endif
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- tc574_config(link);
-
- return 0;
+ return tc574_config(link);
} /* tc574_attach */
/*
@@ -316,18 +308,16 @@ static int tc574_attach(struct pcmcia_device *p_dev)
*/
-static void tc574_detach(struct pcmcia_device *p_dev)
+static void tc574_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "3c574_detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- if (link->state & DEV_CONFIG)
- tc574_release(link);
+ tc574_release(link);
free_netdev(dev);
} /* tc574_detach */
@@ -343,9 +333,8 @@ static void tc574_detach(struct pcmcia_device *p_dev)
static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
-static void tc574_config(dev_link_t *link)
+static int tc574_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
struct el3_private *lp = netdev_priv(dev);
tuple_t tuple;
@@ -363,30 +352,27 @@ static void tc574_config(dev_link_t *link)
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
link->io.IOAddrLines = 16;
for (i = j = 0; j < 0x400; j += 0x20) {
link->io.BasePort1 = j ^ 0x300;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
goto failed;
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
@@ -397,8 +383,8 @@ static void tc574_config(dev_link_t *link)
the hardware address. The future products may include a modem chip
and put the address in the CIS. */
tuple.DesiredTuple = 0x88;
- if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) {
- pcmcia_get_tuple_data(handle, &tuple);
+ if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
+ pcmcia_get_tuple_data(link, &tuple);
for (i = 0; i < 3; i++)
phys_addr[i] = htons(buf[i]);
} else {
@@ -412,9 +398,9 @@ static void tc574_config(dev_link_t *link)
}
}
tuple.DesiredTuple = CISTPL_VERS_1;
- if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS &&
- pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS &&
- pcmcia_parse_tuple(handle, &tuple, &parse) == CS_SUCCESS) {
+ if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS &&
+ pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS &&
+ pcmcia_parse_tuple(link, &tuple, &parse) == CS_SUCCESS) {
cardname = parse.version_1.str + parse.version_1.ofs[1];
} else
cardname = "3Com 3c574";
@@ -473,13 +459,12 @@ static void tc574_config(dev_link_t *link)
}
}
- link->state &= ~DEV_CONFIG_PENDING;
- link->dev = &lp->node;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &lp->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto failed;
}
@@ -493,13 +478,13 @@ static void tc574_config(dev_link_t *link)
8 << config.u.ram_size, ram_split[config.u.ram_split],
config.u.autoselect ? "autoselect " : "");
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
tc574_release(link);
- return;
+ return -ENODEV;
} /* tc574_config */
@@ -509,44 +494,28 @@ failed:
still open, this will be postponed until it is closed.
*/
-static void tc574_release(dev_link_t *link)
+static void tc574_release(struct pcmcia_device *link)
{
- DEBUG(0, "3c574_release(0x%p)\n", link);
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
}
-static int tc574_suspend(struct pcmcia_device *p_dev)
+static int tc574_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int tc574_resume(struct pcmcia_device *p_dev)
+static int tc574_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- tc574_reset(dev);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ tc574_reset(dev);
+ netif_device_attach(dev);
}
return 0;
@@ -757,9 +726,9 @@ static void tc574_reset(struct net_device *dev)
static int el3_open(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
@@ -1203,11 +1172,11 @@ static int el3_close(struct net_device *dev)
{
kio_addr_t ioaddr = dev->base_addr;
struct el3_private *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
- if (DEV_OK(link)) {
+ if (pcmcia_dev_present(link)) {
unsigned long flags;
/* Turn off statistics ASAP. We update lp->stats below. */
@@ -1246,7 +1215,7 @@ static struct pcmcia_driver tc574_driver = {
.drv = {
.name = "3c574_cs",
},
- .probe = tc574_attach,
+ .probe = tc574_probe,
.remove = tc574_detach,
.id_table = tc574_ids,
.suspend = tc574_suspend,
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 3dba50849da74..875a0fe251e7b 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -105,7 +105,7 @@ enum RxFilter {
#define TX_TIMEOUT ((400*HZ)/1000)
struct el3_private {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct net_device_stats stats;
/* For transceiver monitoring */
@@ -142,8 +142,8 @@ DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)";
/*====================================================================*/
-static void tc589_config(dev_link_t *link);
-static void tc589_release(dev_link_t *link);
+static int tc589_config(struct pcmcia_device *link);
+static void tc589_release(struct pcmcia_device *link);
static u16 read_eeprom(kio_addr_t ioaddr, int index);
static void tc589_reset(struct net_device *dev);
@@ -170,10 +170,9 @@ static void tc589_detach(struct pcmcia_device *p_dev);
======================================================================*/
-static int tc589_attach(struct pcmcia_device *p_dev)
+static int tc589_probe(struct pcmcia_device *link)
{
struct el3_private *lp;
- dev_link_t *link;
struct net_device *dev;
DEBUG(0, "3c589_attach()\n");
@@ -183,8 +182,8 @@ static int tc589_attach(struct pcmcia_device *p_dev)
if (!dev)
return -ENOMEM;
lp = netdev_priv(dev);
- link = &lp->link;
link->priv = dev;
+ lp->p_dev = link;
spin_lock_init(&lp->lock);
link->io.NumPorts1 = 16;
@@ -194,7 +193,6 @@ static int tc589_attach(struct pcmcia_device *p_dev)
link->irq.Handler = &el3_interrupt;
link->irq.Instance = dev;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
@@ -213,13 +211,7 @@ static int tc589_attach(struct pcmcia_device *p_dev)
#endif
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- tc589_config(link);
-
- return 0;
+ return tc589_config(link);
} /* tc589_attach */
/*======================================================================
@@ -231,18 +223,16 @@ static int tc589_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void tc589_detach(struct pcmcia_device *p_dev)
+static void tc589_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "3c589_detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- if (link->state & DEV_CONFIG)
- tc589_release(link);
+ tc589_release(link);
free_netdev(dev);
} /* tc589_detach */
@@ -258,9 +248,8 @@ static void tc589_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void tc589_config(dev_link_t *link)
+static int tc589_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
struct el3_private *lp = netdev_priv(dev);
tuple_t tuple;
@@ -275,43 +264,40 @@ static void tc589_config(dev_link_t *link)
phys_addr = (u16 *)dev->dev_addr;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
/* Is this a 3c562? */
tuple.DesiredTuple = CISTPL_MANFID;
tuple.Attributes = TUPLE_RETURN_COMMON;
- if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) &&
- (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) {
+ if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
+ (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
if (le16_to_cpu(buf[0]) != MANFID_3COM)
printk(KERN_INFO "3c589_cs: hmmm, is this really a "
"3Com card??\n");
multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562);
}
-
- /* Configure card */
- link->state |= DEV_CONFIG;
/* For the 3c562, the base address must be xx00-xx7f */
link->io.IOAddrLines = 16;
for (i = j = 0; j < 0x400; j += 0x10) {
if (multi && (j & 0x80)) continue;
link->io.BasePort1 = j ^ 0x300;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
goto failed;
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
@@ -321,8 +307,8 @@ static void tc589_config(dev_link_t *link)
/* The 3c589 has an extra EEPROM for configuration info, including
the hardware address. The 3c562 puts the address in the CIS. */
tuple.DesiredTuple = 0x88;
- if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) {
- pcmcia_get_tuple_data(handle, &tuple);
+ if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
+ pcmcia_get_tuple_data(link, &tuple);
for (i = 0; i < 3; i++)
phys_addr[i] = htons(buf[i]);
} else {
@@ -346,13 +332,12 @@ static void tc589_config(dev_link_t *link)
else
printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
- link->dev = &lp->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &lp->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
if (register_netdev(dev) != 0) {
printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto failed;
}
@@ -366,14 +351,13 @@ static void tc589_config(dev_link_t *link)
printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
(fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
if_names[dev->if_port]);
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
tc589_release(link);
- return;
-
+ return -ENODEV;
} /* tc589_config */
/*======================================================================
@@ -384,44 +368,28 @@ failed:
======================================================================*/
-static void tc589_release(dev_link_t *link)
+static void tc589_release(struct pcmcia_device *link)
{
- DEBUG(0, "3c589_release(0x%p)\n", link);
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
}
-static int tc589_suspend(struct pcmcia_device *p_dev)
+static int tc589_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int tc589_resume(struct pcmcia_device *p_dev)
+static int tc589_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- tc589_reset(dev);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ tc589_reset(dev);
+ netif_device_attach(dev);
}
return 0;
@@ -587,9 +555,9 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
static int el3_open(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
@@ -848,9 +816,9 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
unsigned long flags;
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
- if (DEV_OK(link)) {
+ if (pcmcia_dev_present(link)) {
spin_lock_irqsave(&lp->lock, flags);
update_stats(dev);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -950,11 +918,11 @@ static int el3_rx(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
kio_addr_t ioaddr = dev->base_addr;
u16 opts = SetRxFilter | RxStation | RxBroadcast;
- if (!(DEV_OK(link))) return;
+ if (!pcmcia_dev_present(link)) return;
if (dev->flags & IFF_PROMISC)
opts |= RxMulticast | RxProm;
else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
@@ -965,12 +933,12 @@ static void set_multicast_list(struct net_device *dev)
static int el3_close(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
kio_addr_t ioaddr = dev->base_addr;
DEBUG(1, "%s: shutting down ethercard.\n", dev->name);
- if (DEV_OK(link)) {
+ if (pcmcia_dev_present(link)) {
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
@@ -1020,7 +988,7 @@ static struct pcmcia_driver tc589_driver = {
.drv = {
.name = "3c589_cs",
},
- .probe = tc589_attach,
+ .probe = tc589_probe,
.remove = tc589_detach,
.id_table = tc589_ids,
.suspend = tc589_suspend,
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1cc94b2d76c11..56233afcb2b3d 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -86,8 +86,8 @@ static char *version =
/*====================================================================*/
-static void axnet_config(dev_link_t *link);
-static void axnet_release(dev_link_t *link);
+static int axnet_config(struct pcmcia_device *link);
+static void axnet_release(struct pcmcia_device *link);
static int axnet_open(struct net_device *dev);
static int axnet_close(struct net_device *dev);
static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -117,7 +117,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id, struct pt_regs *regs);
/*====================================================================*/
typedef struct axnet_dev_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
caddr_t base;
struct timer_list watchdog;
@@ -142,10 +142,9 @@ static inline axnet_dev_t *PRIV(struct net_device *dev)
======================================================================*/
-static int axnet_attach(struct pcmcia_device *p_dev)
+static int axnet_probe(struct pcmcia_device *link)
{
axnet_dev_t *info;
- dev_link_t *link;
struct net_device *dev;
DEBUG(0, "axnet_attach()\n");
@@ -157,7 +156,7 @@ static int axnet_attach(struct pcmcia_device *p_dev)
return -ENOMEM;
info = PRIV(dev);
- link = &info->link;
+ info->p_dev = link;
link->priv = dev;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
@@ -169,13 +168,7 @@ static int axnet_attach(struct pcmcia_device *p_dev)
dev->do_ioctl = &axnet_ioctl;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- axnet_config(link);
-
- return 0;
+ return axnet_config(link);
} /* axnet_attach */
/*======================================================================
@@ -187,18 +180,16 @@ static int axnet_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void axnet_detach(struct pcmcia_device *p_dev)
+static void axnet_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "axnet_detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- if (link->state & DEV_CONFIG)
- axnet_release(link);
+ axnet_release(link);
free_netdev(dev);
} /* axnet_detach */
@@ -209,7 +200,7 @@ static void axnet_detach(struct pcmcia_device *p_dev)
======================================================================*/
-static int get_prom(dev_link_t *link)
+static int get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
kio_addr_t ioaddr = dev->base_addr;
@@ -263,7 +254,7 @@ static int get_prom(dev_link_t *link)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static int try_io_port(dev_link_t *link)
+static int try_io_port(struct pcmcia_device *link)
{
int j, ret;
if (link->io.NumPorts1 == 32) {
@@ -284,25 +275,23 @@ static int try_io_port(dev_link_t *link)
for (j = 0; j < 0x400; j += 0x20) {
link->io.BasePort1 = j ^ 0x300;
link->io.BasePort2 = (j ^ 0x300) + 0x10;
- ret = pcmcia_request_io(link->handle, &link->io);
+ ret = pcmcia_request_io(link, &link->io);
if (ret == CS_SUCCESS) return ret;
}
return ret;
} else {
- return pcmcia_request_io(link->handle, &link->io);
+ return pcmcia_request_io(link, &link->io);
}
}
-static void axnet_config(dev_link_t *link)
+static int axnet_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
axnet_dev_t *info = PRIV(dev);
tuple_t tuple;
cisparse_t parse;
int i, j, last_ret, last_fn;
u_short buf[64];
- config_info_t conf;
DEBUG(0, "axnet_config(0x%p)\n", link);
@@ -311,29 +300,22 @@ static void axnet_config(dev_link_t *link)
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
/* don't trust the CIS on this; Linksys got it wrong */
link->conf.Present = 0x63;
- /* Configure card */
- link->state |= DEV_CONFIG;
-
- /* Look up current Vcc */
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
-
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (last_ret == CS_SUCCESS) {
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
cistpl_io_t *io = &(parse.cftable_entry.io);
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0 ||
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0 ||
cfg->index == 0 || cfg->io.nwin == 0)
goto next_entry;
@@ -355,21 +337,21 @@ static void axnet_config(dev_link_t *link)
if (last_ret == CS_SUCCESS) break;
}
next_entry:
- last_ret = pcmcia_get_next_tuple(handle, &tuple);
+ last_ret = pcmcia_get_next_tuple(link, &tuple);
}
if (last_ret != CS_SUCCESS) {
- cs_error(handle, RequestIO, last_ret);
+ cs_error(link, RequestIO, last_ret);
goto failed;
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
if (link->io.NumPorts2 == 8) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
}
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
@@ -406,7 +388,7 @@ static void axnet_config(dev_link_t *link)
Bit 2 of CCSR is active low. */
if (i == 32) {
conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 };
- pcmcia_access_configuration_register(link->handle, &reg);
+ pcmcia_access_configuration_register(link, &reg);
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
if ((j != 0) && (j != 0xffff)) break;
@@ -414,13 +396,12 @@ static void axnet_config(dev_link_t *link)
}
info->phy_id = (i < 32) ? i : -1;
- link->dev = &info->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &info->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto failed;
}
@@ -436,14 +417,13 @@ static void axnet_config(dev_link_t *link)
} else {
printk(KERN_NOTICE " No MII transceivers found!\n");
}
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
axnet_release(link);
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ return -ENODEV;
} /* axnet_config */
/*======================================================================
@@ -454,45 +434,29 @@ failed:
======================================================================*/
-static void axnet_release(dev_link_t *link)
+static void axnet_release(struct pcmcia_device *link)
{
- DEBUG(0, "axnet_release(0x%p)\n", link);
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
}
-static int axnet_suspend(struct pcmcia_device *p_dev)
+static int axnet_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int axnet_resume(struct pcmcia_device *p_dev)
+static int axnet_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- axnet_reset_8390(dev);
- AX88190_init(dev, 1);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ axnet_reset_8390(dev);
+ AX88190_init(dev, 1);
+ netif_device_attach(dev);
}
return 0;
@@ -562,11 +526,11 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
static int axnet_open(struct net_device *dev)
{
axnet_dev_t *info = PRIV(dev);
- dev_link_t *link = &info->link;
+ struct pcmcia_device *link = info->p_dev;
DEBUG(2, "axnet_open('%s')\n", dev->name);
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
@@ -588,7 +552,7 @@ static int axnet_open(struct net_device *dev)
static int axnet_close(struct net_device *dev)
{
axnet_dev_t *info = PRIV(dev);
- dev_link_t *link = &info->link;
+ struct pcmcia_device *link = info->p_dev;
DEBUG(2, "axnet_close('%s')\n", dev->name);
@@ -833,7 +797,7 @@ static struct pcmcia_driver axnet_cs_driver = {
.drv = {
.name = "axnet_cs",
},
- .probe = axnet_attach,
+ .probe = axnet_probe,
.remove = axnet_detach,
.id_table = axnet_ids,
.suspend = axnet_suspend,
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 2827a48ea37c6..441de824ab6ba 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -118,8 +118,8 @@ MODULE_LICENSE("GPL");
/*====================================================================*/
-static void com20020_config(dev_link_t *link);
-static void com20020_release(dev_link_t *link);
+static int com20020_config(struct pcmcia_device *link);
+static void com20020_release(struct pcmcia_device *link);
static void com20020_detach(struct pcmcia_device *p_dev);
@@ -138,9 +138,8 @@ typedef struct com20020_dev_t {
======================================================================*/
-static int com20020_attach(struct pcmcia_device *p_dev)
+static int com20020_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
com20020_dev_t *info;
struct net_device *dev;
struct arcnet_local *lp;
@@ -148,10 +147,6 @@ static int com20020_attach(struct pcmcia_device *p_dev)
DEBUG(0, "com20020_attach()\n");
/* Create new network device */
- link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
- if (!link)
- return -ENOMEM;
-
info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
if (!info)
goto fail_alloc_info;
@@ -161,7 +156,6 @@ static int com20020_attach(struct pcmcia_device *p_dev)
goto fail_alloc_dev;
memset(info, 0, sizeof(struct com20020_dev_t));
- memset(link, 0, sizeof(struct dev_link_t));
lp = dev->priv;
lp->timeout = timeout;
lp->backplane = backplane;
@@ -172,28 +166,23 @@ static int com20020_attach(struct pcmcia_device *p_dev)
/* fill in our module parameters as defaults */
dev->dev_addr[0] = node;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts1 = 16;
- link->io.IOAddrLines = 16;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->conf.Present = PRESENT_OPTION;
-
- link->irq.Instance = info->dev = dev;
- link->priv = info;
+ p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ p_dev->io.NumPorts1 = 16;
+ p_dev->io.IOAddrLines = 16;
+ p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ p_dev->conf.Attributes = CONF_ENABLE_IRQ;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
+ p_dev->conf.Present = PRESENT_OPTION;
- link->state |= DEV_PRESENT;
- com20020_config(link);
+ p_dev->irq.Instance = info->dev = dev;
+ p_dev->priv = info;
- return 0;
+ return com20020_config(p_dev);
fail_alloc_dev:
kfree(info);
fail_alloc_info:
- kfree(link);
return -ENOMEM;
} /* com20020_attach */
@@ -206,9 +195,8 @@ fail_alloc_info:
======================================================================*/
-static void com20020_detach(struct pcmcia_device *p_dev)
+static void com20020_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct com20020_dev_t *info = link->priv;
struct net_device *dev = info->dev;
@@ -216,7 +204,7 @@ static void com20020_detach(struct pcmcia_device *p_dev)
DEBUG(0, "com20020_detach(0x%p)\n", link);
- if (link->dev) {
+ if (link->dev_node) {
DEBUG(1,"unregister...\n");
unregister_netdev(dev);
@@ -229,8 +217,7 @@ static void com20020_detach(struct pcmcia_device *p_dev)
free_irq(dev->irq, dev);
}
- if (link->state & DEV_CONFIG)
- com20020_release(link);
+ com20020_release(link);
/* Unlink device structure, free bits */
DEBUG(1,"unlinking...\n");
@@ -245,8 +232,6 @@ static void com20020_detach(struct pcmcia_device *p_dev)
DEBUG(1,"kfree2...\n");
kfree(info);
}
- DEBUG(1,"kfree3...\n");
- kfree(link);
} /* com20020_detach */
@@ -261,10 +246,9 @@ static void com20020_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void com20020_config(dev_link_t *link)
+static int com20020_config(struct pcmcia_device *link)
{
struct arcnet_local *lp;
- client_handle_t handle;
tuple_t tuple;
cisparse_t parse;
com20020_dev_t *info;
@@ -273,7 +257,6 @@ static void com20020_config(dev_link_t *link)
u_char buf[64];
int ioaddr;
- handle = link->handle;
info = link->priv;
dev = info->dev;
@@ -286,14 +269,11 @@ static void com20020_config(dev_link_t *link)
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
- /* Configure card */
- link->state |= DEV_CONFIG;
-
DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1);
i = !CS_SUCCESS;
if (!link->io.BasePort1)
@@ -301,13 +281,13 @@ static void com20020_config(dev_link_t *link)
for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10)
{
link->io.BasePort1 = ioaddr;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
break;
}
}
else
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i != CS_SUCCESS)
{
@@ -321,7 +301,7 @@ static void com20020_config(dev_link_t *link)
DEBUG(1,"arcnet: request IRQ %d (%Xh/%Xh)\n",
link->irq.AssignedIRQ,
link->irq.IRQInfo1, link->irq.IRQInfo2);
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS)
{
DEBUG(1,"arcnet: requestIRQ failed totally!\n");
@@ -330,7 +310,7 @@ static void com20020_config(dev_link_t *link)
dev->irq = link->irq.AssignedIRQ;
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
if (com20020_check(dev))
{
@@ -342,15 +322,14 @@ static void com20020_config(dev_link_t *link)
lp->card_name = "PCMCIA COM20020";
lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
- link->dev = &info->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &info->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
i = com20020_found(dev, 0); /* calls register_netdev */
if (i != 0) {
DEBUG(1,KERN_NOTICE "com20020_cs: com20020_found() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto failed;
}
@@ -358,13 +337,14 @@ static void com20020_config(dev_link_t *link)
DEBUG(1,KERN_INFO "%s: port %#3lx, irq %d\n",
dev->name, dev->base_addr, dev->irq);
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
DEBUG(1,"com20020_config failed...\n");
com20020_release(link);
+ return -ENODEV;
} /* com20020_config */
/*======================================================================
@@ -375,52 +355,33 @@ failed:
======================================================================*/
-static void com20020_release(dev_link_t *link)
+static void com20020_release(struct pcmcia_device *link)
{
-
- DEBUG(1,"release...\n");
-
- DEBUG(0, "com20020_release(0x%p)\n", link);
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~(DEV_CONFIG | DEV_RELEASE_PENDING);
+ DEBUG(0, "com20020_release(0x%p)\n", link);
+ pcmcia_disable_device(link);
}
-static int com20020_suspend(struct pcmcia_device *p_dev)
+static int com20020_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
com20020_dev_t *info = link->priv;
struct net_device *dev = info->dev;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_device_detach(dev);
- }
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int com20020_resume(struct pcmcia_device *p_dev)
+static int com20020_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
com20020_dev_t *info = link->priv;
struct net_device *dev = info->dev;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- int ioaddr = dev->base_addr;
- struct arcnet_local *lp = dev->priv;
- ARCRESET;
- }
- }
+ if (link->open) {
+ int ioaddr = dev->base_addr;
+ struct arcnet_local *lp = dev->priv;
+ ARCRESET;
+ }
return 0;
}
@@ -436,7 +397,7 @@ static struct pcmcia_driver com20020_cs_driver = {
.drv = {
.name = "com20020_cs",
},
- .probe = com20020_attach,
+ .probe = com20020_probe,
.remove = com20020_detach,
.id_table = com20020_ids,
.suspend = com20020_suspend,
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index b7ac14ba8877d..09b11761cdfab 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -84,10 +84,10 @@ static char *version = DRV_NAME ".c " DRV_VERSION " 2002/03/23";
/*
PCMCIA event handlers
*/
-static void fmvj18x_config(dev_link_t *link);
-static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id);
-static int fmvj18x_setup_mfc(dev_link_t *link);
-static void fmvj18x_release(dev_link_t *link);
+static int fmvj18x_config(struct pcmcia_device *link);
+static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id);
+static int fmvj18x_setup_mfc(struct pcmcia_device *link);
+static void fmvj18x_release(struct pcmcia_device *link);
static void fmvj18x_detach(struct pcmcia_device *p_dev);
/*
@@ -116,7 +116,7 @@ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
driver specific data structure
*/
typedef struct local_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct net_device_stats stats;
long open_time;
@@ -228,10 +228,9 @@ typedef struct local_info_t {
#define BANK_1U 0x24 /* bank 1 (CONFIG_1) */
#define BANK_2U 0x28 /* bank 2 (CONFIG_1) */
-static int fmvj18x_attach(struct pcmcia_device *p_dev)
+static int fmvj18x_probe(struct pcmcia_device *link)
{
local_info_t *lp;
- dev_link_t *link;
struct net_device *dev;
DEBUG(0, "fmvj18x_attach()\n");
@@ -241,8 +240,8 @@ static int fmvj18x_attach(struct pcmcia_device *p_dev)
if (!dev)
return -ENOMEM;
lp = netdev_priv(dev);
- link = &lp->link;
link->priv = dev;
+ lp->p_dev = link;
/* The io structure describes IO port mapping */
link->io.NumPorts1 = 32;
@@ -257,7 +256,6 @@ static int fmvj18x_attach(struct pcmcia_device *p_dev)
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
/* The FMVJ18x specific entries in the device structure. */
@@ -274,29 +272,21 @@ static int fmvj18x_attach(struct pcmcia_device *p_dev)
#endif
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- fmvj18x_config(link);
-
- return 0;
+ return fmvj18x_config(link);
} /* fmvj18x_attach */
/*====================================================================*/
-static void fmvj18x_detach(struct pcmcia_device *p_dev)
+static void fmvj18x_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "fmvj18x_detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- if (link->state & DEV_CONFIG)
- fmvj18x_release(link);
+ fmvj18x_release(link);
free_netdev(dev);
} /* fmvj18x_detach */
@@ -306,7 +296,7 @@ static void fmvj18x_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static int mfc_try_io_port(dev_link_t *link)
+static int mfc_try_io_port(struct pcmcia_device *link)
{
int i, ret;
static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
@@ -318,13 +308,13 @@ static int mfc_try_io_port(dev_link_t *link)
link->io.NumPorts2 = 0;
printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n");
}
- ret = pcmcia_request_io(link->handle, &link->io);
+ ret = pcmcia_request_io(link, &link->io);
if (ret == CS_SUCCESS) return ret;
}
return ret;
}
-static int ungermann_try_io_port(dev_link_t *link)
+static int ungermann_try_io_port(struct pcmcia_device *link)
{
int ret;
kio_addr_t ioaddr;
@@ -334,7 +324,7 @@ static int ungermann_try_io_port(dev_link_t *link)
*/
for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) {
link->io.BasePort1 = ioaddr;
- ret = pcmcia_request_io(link->handle, &link->io);
+ ret = pcmcia_request_io(link, &link->io);
if (ret == CS_SUCCESS) {
/* calculate ConfigIndex value */
link->conf.ConfigIndex =
@@ -345,9 +335,8 @@ static int ungermann_try_io_port(dev_link_t *link)
return ret; /* RequestIO failed */
}
-static void fmvj18x_config(dev_link_t *link)
+static int fmvj18x_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
local_info_t *lp = netdev_priv(dev);
tuple_t tuple;
@@ -366,42 +355,34 @@ static void fmvj18x_config(dev_link_t *link)
registers.
*/
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
tuple.TupleData = (u_char *)buf;
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
-
- /* Configure card */
- link->state |= DEV_CONFIG;
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
tuple.DesiredTuple = CISTPL_FUNCE;
tuple.TupleOffset = 0;
- if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) {
+ if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
/* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigIndex = parse.cftable_entry.index;
tuple.DesiredTuple = CISTPL_MANFID;
- if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS)
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
else
buf[0] = 0xffff;
switch (le16_to_cpu(buf[0])) {
case MANFID_TDK:
cardtype = TDK;
- if (le16_to_cpu(buf[1]) == PRODID_TDK_CF010) {
- cs_status_t status;
- pcmcia_get_status(handle, &status);
- if (status.CardState & CS_EVENT_3VCARD)
- link->conf.Vcc = 33; /* inserted in 3.3V slot */
- } else if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410
+ if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410
|| le16_to_cpu(buf[1]) == PRODID_TDK_NP9610
|| le16_to_cpu(buf[1]) == PRODID_TDK_MN3200) {
/* MultiFunction Card */
@@ -429,8 +410,8 @@ static void fmvj18x_config(dev_link_t *link)
} else {
/* old type card */
tuple.DesiredTuple = CISTPL_MANFID;
- if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS)
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
else
buf[0] = 0xffff;
switch (le16_to_cpu(buf[0])) {
@@ -461,10 +442,10 @@ static void fmvj18x_config(dev_link_t *link)
ret = ungermann_try_io_port(link);
if (ret != CS_SUCCESS) goto cs_failed;
} else {
- CS_CHECK(RequestIO, pcmcia_request_io(link->handle, &link->io));
+ CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
@@ -493,17 +474,17 @@ static void fmvj18x_config(dev_link_t *link)
case CONTEC:
tuple.DesiredTuple = CISTPL_FUNCE;
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
tuple.TupleOffset = 0;
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
if (cardtype == MBH10304) {
/* MBH10304's CIS_FUNCE is corrupted */
node_id = &(tuple.TupleData[5]);
card_name = "FMV-J182";
} else {
while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) {
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
}
node_id = &(tuple.TupleData[2]);
if( cardtype == TDK ) {
@@ -545,13 +526,12 @@ static void fmvj18x_config(dev_link_t *link)
}
lp->cardtype = cardtype;
- link->dev = &lp->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &lp->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto failed;
}
@@ -564,19 +544,18 @@ static void fmvj18x_config(dev_link_t *link)
for (i = 0; i < 6; i++)
printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
- return;
+ return 0;
cs_failed:
/* All Card Services errors end up here */
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
fmvj18x_release(link);
- link->state &= ~DEV_CONFIG_PENDING;
-
+ return -ENODEV;
} /* fmvj18x_config */
/*====================================================================*/
-static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id)
+static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
{
win_req_t req;
memreq_t mem;
@@ -587,9 +566,9 @@ static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id)
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
req.Base = 0; req.Size = 0;
req.AccessSpeed = 0;
- i = pcmcia_request_window(&link->handle, &req, &link->win);
+ i = pcmcia_request_window(&link, &req, &link->win);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestWindow, i);
+ cs_error(link, RequestWindow, i);
return -1;
}
@@ -623,13 +602,13 @@ static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id)
iounmap(base);
j = pcmcia_release_window(link->win);
if (j != CS_SUCCESS)
- cs_error(link->handle, ReleaseWindow, j);
+ cs_error(link, ReleaseWindow, j);
return (i != 0x200) ? 0 : -1;
} /* fmvj18x_get_hwinfo */
/*====================================================================*/
-static int fmvj18x_setup_mfc(dev_link_t *link)
+static int fmvj18x_setup_mfc(struct pcmcia_device *link)
{
win_req_t req;
memreq_t mem;
@@ -642,9 +621,9 @@ static int fmvj18x_setup_mfc(dev_link_t *link)
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
req.Base = 0; req.Size = 0;
req.AccessSpeed = 0;
- i = pcmcia_request_window(&link->handle, &req, &link->win);
+ i = pcmcia_request_window(&link, &req, &link->win);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestWindow, i);
+ cs_error(link, RequestWindow, i);
return -1;
}
@@ -666,54 +645,35 @@ static int fmvj18x_setup_mfc(dev_link_t *link)
iounmap(base);
j = pcmcia_release_window(link->win);
if (j != CS_SUCCESS)
- cs_error(link->handle, ReleaseWindow, j);
+ cs_error(link, ReleaseWindow, j);
return 0;
}
/*====================================================================*/
-static void fmvj18x_release(dev_link_t *link)
+static void fmvj18x_release(struct pcmcia_device *link)
{
-
- DEBUG(0, "fmvj18x_release(0x%p)\n", link);
-
- /* Don't bother checking to see if these succeed or not */
- pcmcia_release_window(link->win);
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ DEBUG(0, "fmvj18x_release(0x%p)\n", link);
+ pcmcia_disable_device(link);
}
-static int fmvj18x_suspend(struct pcmcia_device *p_dev)
+static int fmvj18x_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
-
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int fmvj18x_resume(struct pcmcia_device *p_dev)
+static int fmvj18x_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- fjn_reset(dev);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ fjn_reset(dev);
+ netif_device_attach(dev);
}
return 0;
@@ -751,7 +711,7 @@ static struct pcmcia_driver fmvj18x_cs_driver = {
.drv = {
.name = "fmvj18x_cs",
},
- .probe = fmvj18x_attach,
+ .probe = fmvj18x_probe,
.remove = fmvj18x_detach,
.id_table = fmvj18x_ids,
.suspend = fmvj18x_suspend,
@@ -1148,11 +1108,11 @@ static int fjn_config(struct net_device *dev, struct ifmap *map){
static int fjn_open(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
DEBUG(4, "fjn_open('%s').\n", dev->name);
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
@@ -1173,7 +1133,7 @@ static int fjn_open(struct net_device *dev)
static int fjn_close(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
kio_addr_t ioaddr = dev->base_addr;
DEBUG(4, "fjn_close('%s').\n", dev->name);
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index b9c7e39576f57..b8fe70b85641a 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -105,15 +105,15 @@ MODULE_LICENSE("GPL");
/*====================================================================*/
-static void ibmtr_config(dev_link_t *link);
+static int ibmtr_config(struct pcmcia_device *link);
static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase);
-static void ibmtr_release(dev_link_t *link);
+static void ibmtr_release(struct pcmcia_device *link);
static void ibmtr_detach(struct pcmcia_device *p_dev);
/*====================================================================*/
typedef struct ibmtr_dev_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
struct net_device *dev;
dev_node_t node;
window_handle_t sram_win_handle;
@@ -138,12 +138,11 @@ static struct ethtool_ops netdev_ethtool_ops = {
======================================================================*/
-static int ibmtr_attach(struct pcmcia_device *p_dev)
+static int ibmtr_attach(struct pcmcia_device *link)
{
ibmtr_dev_t *info;
- dev_link_t *link;
struct net_device *dev;
-
+
DEBUG(0, "ibmtr_attach()\n");
/* Create new token-ring device */
@@ -156,7 +155,7 @@ static int ibmtr_attach(struct pcmcia_device *p_dev)
return -ENOMEM;
}
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
info->ti = netdev_priv(dev);
@@ -167,21 +166,14 @@ static int ibmtr_attach(struct pcmcia_device *p_dev)
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->irq.Handler = &tok_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
link->irq.Instance = info->dev = dev;
-
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
-
- link->handle = p_dev;
- p_dev->instance = link;
- link->state |= DEV_PRESENT;
- ibmtr_config(link);
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- return 0;
+ return ibmtr_config(link);
} /* ibmtr_attach */
/*======================================================================
@@ -193,23 +185,22 @@ static int ibmtr_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void ibmtr_detach(struct pcmcia_device *p_dev)
+static void ibmtr_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
DEBUG(0, "ibmtr_detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
{
struct tok_info *ti = netdev_priv(dev);
del_timer_sync(&(ti->tr_timer));
}
- if (link->state & DEV_CONFIG)
- ibmtr_release(link);
+
+ ibmtr_release(link);
free_netdev(dev);
kfree(info);
@@ -226,9 +217,8 @@ static void ibmtr_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void ibmtr_config(dev_link_t *link)
+static int ibmtr_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
struct tok_info *ti = netdev_priv(dev);
@@ -246,29 +236,25 @@ static void ibmtr_config(dev_link_t *link)
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
-
- /* Configure card */
- link->state |= DEV_CONFIG;
-
link->conf.ConfigIndex = 0x61;
/* Determine if this is PRIMARY or ALTERNATE. */
/* Try PRIMARY card at 0xA20-0xA23 */
link->io.BasePort1 = 0xA20;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i != CS_SUCCESS) {
/* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
link->io.BasePort1 = 0xA24;
- CS_CHECK(RequestIO, pcmcia_request_io(link->handle, &link->io));
+ CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
}
dev->base_addr = link->io.BasePort1;
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
dev->irq = link->irq.AssignedIRQ;
ti->irq = link->irq.AssignedIRQ;
ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
@@ -279,7 +265,7 @@ static void ibmtr_config(dev_link_t *link)
req.Base = 0;
req.Size = 0x2000;
req.AccessSpeed = 250;
- CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win));
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win));
mem.CardOffset = mmiobase;
mem.Page = 0;
@@ -292,7 +278,7 @@ static void ibmtr_config(dev_link_t *link)
req.Base = 0;
req.Size = sramsize * 1024;
req.AccessSpeed = 250;
- CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &info->sram_win_handle));
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &info->sram_win_handle));
mem.CardOffset = srambase;
mem.Page = 0;
@@ -302,21 +288,20 @@ static void ibmtr_config(dev_link_t *link)
ti->sram_virt = ioremap(req.Base, req.Size);
ti->sram_phys = req.Base;
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
/* Set up the Token-Ring Controller Configuration Register and
turn on the card. Check the "Local Area Network Credit Card
Adapters Technical Reference" SC30-3585 for this info. */
ibmtr_hw_setup(dev, mmiobase);
- link->dev = &info->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &info->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
i = ibmtr_probe_card(dev);
if (i != 0) {
printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto failed;
}
@@ -330,12 +315,13 @@ static void ibmtr_config(dev_link_t *link)
for (i = 0; i < TR_ALEN; i++)
printk("%02X", dev->dev_addr[i]);
printk("\n");
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
ibmtr_release(link);
+ return -ENODEV;
} /* ibmtr_config */
/*======================================================================
@@ -346,56 +332,41 @@ failed:
======================================================================*/
-static void ibmtr_release(dev_link_t *link)
+static void ibmtr_release(struct pcmcia_device *link)
{
- ibmtr_dev_t *info = link->priv;
- struct net_device *dev = info->dev;
-
- DEBUG(0, "ibmtr_release(0x%p)\n", link);
+ ibmtr_dev_t *info = link->priv;
+ struct net_device *dev = info->dev;
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- if (link->win) {
- struct tok_info *ti = netdev_priv(dev);
- iounmap(ti->mmio);
- pcmcia_release_window(link->win);
- pcmcia_release_window(info->sram_win_handle);
- }
+ DEBUG(0, "ibmtr_release(0x%p)\n", link);
- link->state &= ~DEV_CONFIG;
+ if (link->win) {
+ struct tok_info *ti = netdev_priv(dev);
+ iounmap(ti->mmio);
+ pcmcia_release_window(info->sram_win_handle);
+ }
+ pcmcia_disable_device(link);
}
-static int ibmtr_suspend(struct pcmcia_device *p_dev)
+static int ibmtr_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int ibmtr_resume(struct pcmcia_device *p_dev)
+static int ibmtr_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- ibmtr_probe(dev); /* really? */
- netif_device_attach(dev);
- }
- }
+ if (link->open) {
+ ibmtr_probe(dev); /* really? */
+ netif_device_attach(dev);
+ }
return 0;
}
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 787176c57fd9e..4260c2128f472 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -362,7 +362,7 @@ typedef struct _mace_statistics {
} mace_statistics;
typedef struct _mace_private {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct net_device_stats linux_stats; /* Linux statistics counters */
mace_statistics mace_stats; /* MACE chip statistics counters */
@@ -417,8 +417,8 @@ INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
Function Prototypes
---------------------------------------------------------------------------- */
-static void nmclan_config(dev_link_t *link);
-static void nmclan_release(dev_link_t *link);
+static int nmclan_config(struct pcmcia_device *link);
+static void nmclan_release(struct pcmcia_device *link);
static void nmclan_reset(struct net_device *dev);
static int mace_config(struct net_device *dev, struct ifmap *map);
@@ -443,10 +443,9 @@ nmclan_attach
Services.
---------------------------------------------------------------------------- */
-static int nmclan_attach(struct pcmcia_device *p_dev)
+static int nmclan_probe(struct pcmcia_device *link)
{
mace_private *lp;
- dev_link_t *link;
struct net_device *dev;
DEBUG(0, "nmclan_attach()\n");
@@ -457,7 +456,7 @@ static int nmclan_attach(struct pcmcia_device *p_dev)
if (!dev)
return -ENOMEM;
lp = netdev_priv(dev);
- link = &lp->link;
+ lp->p_dev = link;
link->priv = dev;
spin_lock_init(&lp->bank_lock);
@@ -469,7 +468,6 @@ static int nmclan_attach(struct pcmcia_device *p_dev)
link->irq.Handler = &mace_interrupt;
link->irq.Instance = dev;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
@@ -489,13 +487,7 @@ static int nmclan_attach(struct pcmcia_device *p_dev)
dev->watchdog_timeo = TX_TIMEOUT;
#endif
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- nmclan_config(link);
-
- return 0;
+ return nmclan_config(link);
} /* nmclan_attach */
/* ----------------------------------------------------------------------------
@@ -506,18 +498,16 @@ nmclan_detach
when the device is released.
---------------------------------------------------------------------------- */
-static void nmclan_detach(struct pcmcia_device *p_dev)
+static void nmclan_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "nmclan_detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- if (link->state & DEV_CONFIG)
- nmclan_release(link);
+ nmclan_release(link);
free_netdev(dev);
} /* nmclan_detach */
@@ -661,9 +651,8 @@ nmclan_config
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void nmclan_config(dev_link_t *link)
+static int nmclan_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
mace_private *lp = netdev_priv(dev);
tuple_t tuple;
@@ -679,17 +668,14 @@ static void nmclan_config(dev_link_t *link)
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
- /* Configure card */
- link->state |= DEV_CONFIG;
-
- CS_CHECK(RequestIO, pcmcia_request_io(handle, &link->io));
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
@@ -700,8 +686,8 @@ static void nmclan_config(dev_link_t *link)
tuple.TupleData = buf;
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
/* Verify configuration by reading the MACE ID. */
@@ -716,8 +702,7 @@ static void nmclan_config(dev_link_t *link)
} else {
printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
" be 0x40 0x?9\n", sig[0], sig[1]);
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ return -ENODEV;
}
}
@@ -730,14 +715,13 @@ static void nmclan_config(dev_link_t *link)
else
printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
- link->dev = &lp->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &lp->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
i = register_netdev(dev);
if (i != 0) {
printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto failed;
}
@@ -747,14 +731,13 @@ static void nmclan_config(dev_link_t *link)
dev->name, dev->base_addr, dev->irq, if_names[dev->if_port]);
for (i = 0; i < 6; i++)
printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
- nmclan_release(link);
- return;
-
+ nmclan_release(link);
+ return -ENODEV;
} /* nmclan_config */
/* ----------------------------------------------------------------------------
@@ -763,46 +746,29 @@ nmclan_release
net device, and release the PCMCIA configuration. If the device
is still open, this will be postponed until it is closed.
---------------------------------------------------------------------------- */
-static void nmclan_release(dev_link_t *link)
+static void nmclan_release(struct pcmcia_device *link)
{
-
- DEBUG(0, "nmclan_release(0x%p)\n", link);
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ DEBUG(0, "nmclan_release(0x%p)\n", link);
+ pcmcia_disable_device(link);
}
-static int nmclan_suspend(struct pcmcia_device *p_dev)
+static int nmclan_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
-
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int nmclan_resume(struct pcmcia_device *p_dev)
+static int nmclan_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- nmclan_reset(dev);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ nmclan_reset(dev);
+ netif_device_attach(dev);
}
return 0;
@@ -818,7 +784,7 @@ static void nmclan_reset(struct net_device *dev)
mace_private *lp = netdev_priv(dev);
#if RESET_XILINX
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = &lp->link;
conf_reg_t reg;
u_long OrigCorValue;
@@ -827,7 +793,7 @@ static void nmclan_reset(struct net_device *dev)
reg.Action = CS_READ;
reg.Offset = CISREG_COR;
reg.Value = 0;
- pcmcia_access_configuration_register(link->handle, &reg);
+ pcmcia_access_configuration_register(link, &reg);
OrigCorValue = reg.Value;
/* Reset Xilinx */
@@ -836,12 +802,12 @@ static void nmclan_reset(struct net_device *dev)
DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
OrigCorValue);
reg.Value = COR_SOFT_RESET;
- pcmcia_access_configuration_register(link->handle, &reg);
+ pcmcia_access_configuration_register(link, &reg);
/* Need to wait for 20 ms for PCMCIA to finish reset. */
/* Restore original COR configuration index */
reg.Value = COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK);
- pcmcia_access_configuration_register(link->handle, &reg);
+ pcmcia_access_configuration_register(link, &reg);
/* Xilinx is now completely reset along with the MACE chip. */
lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
@@ -885,9 +851,9 @@ static int mace_open(struct net_device *dev)
{
kio_addr_t ioaddr = dev->base_addr;
mace_private *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
@@ -908,7 +874,7 @@ static int mace_close(struct net_device *dev)
{
kio_addr_t ioaddr = dev->base_addr;
mace_private *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
@@ -963,12 +929,12 @@ mace_start_xmit
static void mace_tx_timeout(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name);
#if RESET_ON_TIMEOUT
printk("resetting card\n");
- pcmcia_reset_card(link->handle, NULL);
+ pcmcia_reset_card(link, NULL);
#else /* #if RESET_ON_TIMEOUT */
printk("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
@@ -1635,7 +1601,7 @@ static struct pcmcia_driver nmclan_cs_driver = {
.drv = {
.name = "nmclan_cs",
},
- .probe = nmclan_attach,
+ .probe = nmclan_probe,
.remove = nmclan_detach,
.id_table = nmclan_ids,
.suspend = nmclan_suspend,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index b46e5f703efab..506e777c5f06a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -103,8 +103,8 @@ module_param_array(hw_addr, int, NULL, 0);
/*====================================================================*/
static void mii_phy_probe(struct net_device *dev);
-static void pcnet_config(dev_link_t *link);
-static void pcnet_release(dev_link_t *link);
+static int pcnet_config(struct pcmcia_device *link);
+static void pcnet_release(struct pcmcia_device *link);
static int pcnet_open(struct net_device *dev);
static int pcnet_close(struct net_device *dev);
static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -113,9 +113,9 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs);
static void ei_watchdog(u_long arg);
static void pcnet_reset_8390(struct net_device *dev);
static int set_config(struct net_device *dev, struct ifmap *map);
-static int setup_shmem_window(dev_link_t *link, int start_pg,
+static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
int stop_pg, int cm_offset);
-static int setup_dma_config(dev_link_t *link, int start_pg,
+static int setup_dma_config(struct pcmcia_device *link, int start_pg,
int stop_pg);
static void pcnet_detach(struct pcmcia_device *p_dev);
@@ -214,7 +214,7 @@ static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII };
static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
typedef struct pcnet_dev_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
u_int flags;
void __iomem *base;
@@ -240,10 +240,9 @@ static inline pcnet_dev_t *PRIV(struct net_device *dev)
======================================================================*/
-static int pcnet_probe(struct pcmcia_device *p_dev)
+static int pcnet_probe(struct pcmcia_device *link)
{
pcnet_dev_t *info;
- dev_link_t *link;
struct net_device *dev;
DEBUG(0, "pcnet_attach()\n");
@@ -252,7 +251,7 @@ static int pcnet_probe(struct pcmcia_device *p_dev)
dev = __alloc_ei_netdev(sizeof(pcnet_dev_t));
if (!dev) return -ENOMEM;
info = PRIV(dev);
- link = &info->link;
+ info->p_dev = link;
link->priv = dev;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
@@ -265,13 +264,7 @@ static int pcnet_probe(struct pcmcia_device *p_dev)
dev->stop = &pcnet_close;
dev->set_config = &set_config;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- pcnet_config(link);
-
- return 0;
+ return pcnet_config(link);
} /* pcnet_attach */
/*======================================================================
@@ -283,18 +276,16 @@ static int pcnet_probe(struct pcmcia_device *p_dev)
======================================================================*/
-static void pcnet_detach(struct pcmcia_device *p_dev)
+static void pcnet_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "pcnet_detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- if (link->state & DEV_CONFIG)
- pcnet_release(link);
+ pcnet_release(link);
free_netdev(dev);
} /* pcnet_detach */
@@ -306,7 +297,7 @@ static void pcnet_detach(struct pcmcia_device *p_dev)
======================================================================*/
-static hw_info_t *get_hwinfo(dev_link_t *link)
+static hw_info_t *get_hwinfo(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
win_req_t req;
@@ -318,9 +309,9 @@ static hw_info_t *get_hwinfo(dev_link_t *link)
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
req.Base = 0; req.Size = 0;
req.AccessSpeed = 0;
- i = pcmcia_request_window(&link->handle, &req, &link->win);
+ i = pcmcia_request_window(&link, &req, &link->win);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestWindow, i);
+ cs_error(link, RequestWindow, i);
return NULL;
}
@@ -343,7 +334,7 @@ static hw_info_t *get_hwinfo(dev_link_t *link)
iounmap(virt);
j = pcmcia_release_window(link->win);
if (j != CS_SUCCESS)
- cs_error(link->handle, ReleaseWindow, j);
+ cs_error(link, ReleaseWindow, j);
return (i < NR_INFO) ? hw_info+i : NULL;
} /* get_hwinfo */
@@ -355,7 +346,7 @@ static hw_info_t *get_hwinfo(dev_link_t *link)
======================================================================*/
-static hw_info_t *get_prom(dev_link_t *link)
+static hw_info_t *get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
kio_addr_t ioaddr = dev->base_addr;
@@ -409,7 +400,7 @@ static hw_info_t *get_prom(dev_link_t *link)
======================================================================*/
-static hw_info_t *get_dl10019(dev_link_t *link)
+static hw_info_t *get_dl10019(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
int i;
@@ -431,7 +422,7 @@ static hw_info_t *get_dl10019(dev_link_t *link)
======================================================================*/
-static hw_info_t *get_ax88190(dev_link_t *link)
+static hw_info_t *get_ax88190(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
kio_addr_t ioaddr = dev->base_addr;
@@ -464,7 +455,7 @@ static hw_info_t *get_ax88190(dev_link_t *link)
======================================================================*/
-static hw_info_t *get_hwired(dev_link_t *link)
+static hw_info_t *get_hwired(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
int i;
@@ -491,7 +482,7 @@ static hw_info_t *get_hwired(dev_link_t *link)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static int try_io_port(dev_link_t *link)
+static int try_io_port(struct pcmcia_device *link)
{
int j, ret;
if (link->io.NumPorts1 == 32) {
@@ -512,18 +503,17 @@ static int try_io_port(dev_link_t *link)
for (j = 0; j < 0x400; j += 0x20) {
link->io.BasePort1 = j ^ 0x300;
link->io.BasePort2 = (j ^ 0x300) + 0x10;
- ret = pcmcia_request_io(link->handle, &link->io);
+ ret = pcmcia_request_io(link, &link->io);
if (ret == CS_SUCCESS) return ret;
}
return ret;
} else {
- return pcmcia_request_io(link->handle, &link->io);
+ return pcmcia_request_io(link, &link->io);
}
}
-static void pcnet_config(dev_link_t *link)
+static int pcnet_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
pcnet_dev_t *info = PRIV(dev);
tuple_t tuple;
@@ -531,7 +521,6 @@ static void pcnet_config(dev_link_t *link)
int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
int manfid = 0, prodid = 0, has_shmem = 0;
u_short buf[64];
- config_info_t conf;
hw_info_t *hw_info;
DEBUG(0, "pcnet_config(0x%p)\n", link);
@@ -541,36 +530,29 @@ static void pcnet_config(dev_link_t *link)
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
- /* Look up current Vcc */
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
-
tuple.DesiredTuple = CISTPL_MANFID;
tuple.Attributes = TUPLE_RETURN_COMMON;
- if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) &&
- (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) {
+ if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
+ (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
manfid = le16_to_cpu(buf[0]);
prodid = le16_to_cpu(buf[1]);
}
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (last_ret == CS_SUCCESS) {
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
cistpl_io_t *io = &(parse.cftable_entry.io);
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0 ||
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0 ||
cfg->index == 0 || cfg->io.nwin == 0)
goto next_entry;
@@ -594,14 +576,14 @@ static void pcnet_config(dev_link_t *link)
if (last_ret == CS_SUCCESS) break;
}
next_entry:
- last_ret = pcmcia_get_next_tuple(handle, &tuple);
+ last_ret = pcmcia_get_next_tuple(link, &tuple);
}
if (last_ret != CS_SUCCESS) {
- cs_error(handle, RequestIO, last_ret);
+ cs_error(link, RequestIO, last_ret);
goto failed;
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
if (link->io.NumPorts2 == 8) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -611,7 +593,7 @@ static void pcnet_config(dev_link_t *link)
(prodid == PRODID_IBM_HOME_AND_AWAY))
link->conf.ConfigIndex |= 0x10;
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
if (info->flags & HAS_MISC_REG) {
@@ -679,9 +661,8 @@ static void pcnet_config(dev_link_t *link)
info->eth_phy = 0;
}
- link->dev = &info->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &info->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ei_poll;
@@ -689,7 +670,7 @@ static void pcnet_config(dev_link_t *link)
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto failed;
}
@@ -712,14 +693,13 @@ static void pcnet_config(dev_link_t *link)
printk(" hw_addr ");
for (i = 0; i < 6; i++)
printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
pcnet_release(link);
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ return -ENODEV;
} /* pcnet_config */
/*======================================================================
@@ -730,21 +710,16 @@ failed:
======================================================================*/
-static void pcnet_release(dev_link_t *link)
+static void pcnet_release(struct pcmcia_device *link)
{
- pcnet_dev_t *info = PRIV(link->priv);
+ pcnet_dev_t *info = PRIV(link->priv);
- DEBUG(0, "pcnet_release(0x%p)\n", link);
+ DEBUG(0, "pcnet_release(0x%p)\n", link);
- if (info->flags & USE_SHMEM) {
- iounmap(info->base);
- pcmcia_release_window(link->win);
- }
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
+ if (info->flags & USE_SHMEM)
+ iounmap(info->base);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
}
/*======================================================================
@@ -756,34 +731,24 @@ static void pcnet_release(dev_link_t *link)
======================================================================*/
-static int pcnet_suspend(struct pcmcia_device *p_dev)
+static int pcnet_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int pcnet_resume(struct pcmcia_device *p_dev)
+static int pcnet_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- pcnet_reset_8390(dev);
- NS8390_init(dev, 1);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ netif_device_attach(dev);
}
return 0;
@@ -1023,11 +988,11 @@ static void mii_phy_probe(struct net_device *dev)
static int pcnet_open(struct net_device *dev)
{
pcnet_dev_t *info = PRIV(dev);
- dev_link_t *link = &info->link;
-
+ struct pcmcia_device *link = info->p_dev;
+
DEBUG(2, "pcnet_open('%s')\n", dev->name);
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
@@ -1051,7 +1016,7 @@ static int pcnet_open(struct net_device *dev)
static int pcnet_close(struct net_device *dev)
{
pcnet_dev_t *info = PRIV(dev);
- dev_link_t *link = &info->link;
+ struct pcmcia_device *link = info->p_dev;
DEBUG(2, "pcnet_close('%s')\n", dev->name);
@@ -1429,7 +1394,7 @@ static void dma_block_output(struct net_device *dev, int count,
/*====================================================================*/
-static int setup_dma_config(dev_link_t *link, int start_pg,
+static int setup_dma_config(struct pcmcia_device *link, int start_pg,
int stop_pg)
{
struct net_device *dev = link->priv;
@@ -1532,7 +1497,7 @@ static void shmem_block_output(struct net_device *dev, int count,
/*====================================================================*/
-static int setup_shmem_window(dev_link_t *link, int start_pg,
+static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
int stop_pg, int cm_offset)
{
struct net_device *dev = link->priv;
@@ -1554,7 +1519,7 @@ static int setup_shmem_window(dev_link_t *link, int start_pg,
req.Attributes |= WIN_USE_WAIT;
req.Base = 0; req.Size = window_size;
req.AccessSpeed = mem_speed;
- CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win));
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win));
mem.CardOffset = (start_pg << 8) + cm_offset;
offset = mem.CardOffset % window_size;
@@ -1595,7 +1560,7 @@ static int setup_shmem_window(dev_link_t *link, int start_pg,
return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
return 1;
}
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 8839c4faafd6e..e74bf5014ef6b 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -49,6 +49,7 @@
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
+#include <pcmcia/ss.h>
#include <asm/io.h>
#include <asm/system.h>
@@ -103,7 +104,7 @@ static const char *version =
#define MEMORY_WAIT_TIME 8
struct smc_private {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
spinlock_t lock;
u_short manfid;
u_short cardid;
@@ -278,8 +279,8 @@ enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002,
/*====================================================================*/
static void smc91c92_detach(struct pcmcia_device *p_dev);
-static void smc91c92_config(dev_link_t *link);
-static void smc91c92_release(dev_link_t *link);
+static int smc91c92_config(struct pcmcia_device *link);
+static void smc91c92_release(struct pcmcia_device *link);
static int smc_open(struct net_device *dev);
static int smc_close(struct net_device *dev);
@@ -308,10 +309,9 @@ static struct ethtool_ops ethtool_ops;
======================================================================*/
-static int smc91c92_attach(struct pcmcia_device *p_dev)
+static int smc91c92_probe(struct pcmcia_device *link)
{
struct smc_private *smc;
- dev_link_t *link;
struct net_device *dev;
DEBUG(0, "smc91c92_attach()\n");
@@ -321,7 +321,7 @@ static int smc91c92_attach(struct pcmcia_device *p_dev)
if (!dev)
return -ENOMEM;
smc = netdev_priv(dev);
- link = &smc->link;
+ smc->p_dev = link;
link->priv = dev;
spin_lock_init(&smc->lock);
@@ -333,7 +333,6 @@ static int smc91c92_attach(struct pcmcia_device *p_dev)
link->irq.Handler = &smc_interrupt;
link->irq.Instance = dev;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
/* The SMC91c92-specific entries in the device structure. */
@@ -357,13 +356,7 @@ static int smc91c92_attach(struct pcmcia_device *p_dev)
smc->mii_if.phy_id_mask = 0x1f;
smc->mii_if.reg_num_mask = 0x1f;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- smc91c92_config(link);
-
- return 0;
+ return smc91c92_config(link);
} /* smc91c92_attach */
/*======================================================================
@@ -375,18 +368,16 @@ static int smc91c92_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void smc91c92_detach(struct pcmcia_device *p_dev)
+static void smc91c92_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "smc91c92_detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- if (link->state & DEV_CONFIG)
- smc91c92_release(link);
+ smc91c92_release(link);
free_netdev(dev);
} /* smc91c92_detach */
@@ -414,7 +405,7 @@ static int cvt_ascii_address(struct net_device *dev, char *s)
/*====================================================================*/
-static int first_tuple(client_handle_t handle, tuple_t *tuple,
+static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i;
@@ -425,7 +416,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
return pcmcia_parse_tuple(handle, tuple, parse);
}
-static int next_tuple(client_handle_t handle, tuple_t *tuple,
+static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
cisparse_t *parse)
{
int i;
@@ -447,7 +438,7 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
======================================================================*/
-static int mhz_3288_power(dev_link_t *link)
+static int mhz_3288_power(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
@@ -469,7 +460,7 @@ static int mhz_3288_power(dev_link_t *link)
return 0;
}
-static int mhz_mfc_config(dev_link_t *link)
+static int mhz_mfc_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
@@ -504,7 +495,7 @@ static int mhz_mfc_config(dev_link_t *link)
tuple->TupleDataMax = 255;
tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
- i = first_tuple(link->handle, tuple, parse);
+ i = first_tuple(link, tuple, parse);
/* The Megahertz combo cards have modem-like CIS entries, so
we have to explicitly try a bunch of port combinations. */
while (i == CS_SUCCESS) {
@@ -513,11 +504,11 @@ static int mhz_mfc_config(dev_link_t *link)
for (k = 0; k < 0x400; k += 0x10) {
if (k & 0x80) continue;
link->io.BasePort1 = k ^ 0x300;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
if (i == CS_SUCCESS) break;
- i = next_tuple(link->handle, tuple, parse);
+ i = next_tuple(link, tuple, parse);
}
if (i != CS_SUCCESS)
goto free_cfg_mem;
@@ -527,7 +518,7 @@ static int mhz_mfc_config(dev_link_t *link)
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
req.Base = req.Size = 0;
req.AccessSpeed = 0;
- i = pcmcia_request_window(&link->handle, &req, &link->win);
+ i = pcmcia_request_window(&link, &req, &link->win);
if (i != CS_SUCCESS)
goto free_cfg_mem;
smc->base = ioremap(req.Base, req.Size);
@@ -546,9 +537,8 @@ free_cfg_mem:
return i;
}
-static int mhz_setup(dev_link_t *link)
+static int mhz_setup(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
struct smc_cfg_mem *cfg_mem;
tuple_t *tuple;
@@ -571,13 +561,13 @@ static int mhz_setup(dev_link_t *link)
/* Read the station address from the CIS. It is stored as the last
(fourth) string in the Version 1 Version/ID tuple. */
tuple->DesiredTuple = CISTPL_VERS_1;
- if (first_tuple(handle, tuple, parse) != CS_SUCCESS) {
+ if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
rc = -1;
goto free_cfg_mem;
}
/* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
- if (next_tuple(handle, tuple, parse) != CS_SUCCESS)
- first_tuple(handle, tuple, parse);
+ if (next_tuple(link, tuple, parse) != CS_SUCCESS)
+ first_tuple(link, tuple, parse);
if (parse->version_1.ns > 3) {
station_addr = parse->version_1.str + parse->version_1.ofs[3];
if (cvt_ascii_address(dev, station_addr) == 0) {
@@ -588,11 +578,11 @@ static int mhz_setup(dev_link_t *link)
/* Another possibility: for the EM3288, in a special tuple */
tuple->DesiredTuple = 0x81;
- if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS) {
+ if (pcmcia_get_first_tuple(link, tuple) != CS_SUCCESS) {
rc = -1;
goto free_cfg_mem;
}
- if (pcmcia_get_tuple_data(handle, tuple) != CS_SUCCESS) {
+ if (pcmcia_get_tuple_data(link, tuple) != CS_SUCCESS) {
rc = -1;
goto free_cfg_mem;
}
@@ -616,7 +606,7 @@ free_cfg_mem:
======================================================================*/
-static void mot_config(dev_link_t *link)
+static void mot_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
@@ -637,7 +627,7 @@ static void mot_config(dev_link_t *link)
mdelay(100);
}
-static int mot_setup(dev_link_t *link)
+static int mot_setup(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
kio_addr_t ioaddr = dev->base_addr;
@@ -671,7 +661,7 @@ static int mot_setup(dev_link_t *link)
/*====================================================================*/
-static int smc_config(dev_link_t *link)
+static int smc_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct smc_cfg_mem *cfg_mem;
@@ -696,16 +686,16 @@ static int smc_config(dev_link_t *link)
tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
link->io.NumPorts1 = 16;
- i = first_tuple(link->handle, tuple, parse);
+ i = first_tuple(link, tuple, parse);
while (i != CS_NO_MORE_ITEMS) {
if (i == CS_SUCCESS) {
link->conf.ConfigIndex = cf->index;
link->io.BasePort1 = cf->io.win[0].base;
link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
- i = next_tuple(link->handle, tuple, parse);
+ i = next_tuple(link, tuple, parse);
}
if (i == CS_SUCCESS)
dev->base_addr = link->io.BasePort1;
@@ -714,9 +704,8 @@ static int smc_config(dev_link_t *link)
return i;
}
-static int smc_setup(dev_link_t *link)
+static int smc_setup(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
struct smc_cfg_mem *cfg_mem;
tuple_t *tuple;
@@ -739,11 +728,11 @@ static int smc_setup(dev_link_t *link)
/* Check for a LAN function extension tuple */
tuple->DesiredTuple = CISTPL_FUNCE;
- i = first_tuple(handle, tuple, parse);
+ i = first_tuple(link, tuple, parse);
while (i == CS_SUCCESS) {
if (parse->funce.type == CISTPL_FUNCE_LAN_NODE_ID)
break;
- i = next_tuple(handle, tuple, parse);
+ i = next_tuple(link, tuple, parse);
}
if (i == CS_SUCCESS) {
node_id = (cistpl_lan_node_id_t *)parse->funce.data;
@@ -756,7 +745,7 @@ static int smc_setup(dev_link_t *link)
}
/* Try the third string in the Version 1 Version/ID tuple. */
tuple->DesiredTuple = CISTPL_VERS_1;
- if (first_tuple(handle, tuple, parse) != CS_SUCCESS) {
+ if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
rc = -1;
goto free_cfg_mem;
}
@@ -774,7 +763,7 @@ free_cfg_mem:
/*====================================================================*/
-static int osi_config(dev_link_t *link)
+static int osi_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
@@ -794,22 +783,21 @@ static int osi_config(dev_link_t *link)
for (i = j = 0; j < 4; j++) {
link->io.BasePort2 = com[j];
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
if (i != CS_SUCCESS) {
/* Fallback: turn off hard decode */
link->conf.ConfigIndex = 0x03;
link->io.NumPorts2 = 0;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
}
dev->base_addr = link->io.BasePort1 + 0x10;
return i;
}
-static int osi_setup(dev_link_t *link, u_short manfid, u_short cardid)
+static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
struct smc_cfg_mem *cfg_mem;
tuple_t *tuple;
@@ -830,12 +818,12 @@ static int osi_setup(dev_link_t *link, u_short manfid, u_short cardid)
/* Read the station address from tuple 0x90, subtuple 0x04 */
tuple->DesiredTuple = 0x90;
- i = pcmcia_get_first_tuple(handle, tuple);
+ i = pcmcia_get_first_tuple(link, tuple);
while (i == CS_SUCCESS) {
- i = pcmcia_get_tuple_data(handle, tuple);
+ i = pcmcia_get_tuple_data(link, tuple);
if ((i != CS_SUCCESS) || (buf[0] == 0x04))
break;
- i = pcmcia_get_next_tuple(handle, tuple);
+ i = pcmcia_get_next_tuple(link, tuple);
}
if (i != CS_SUCCESS) {
rc = -1;
@@ -868,57 +856,47 @@ free_cfg_mem:
return rc;
}
-static int smc91c92_suspend(struct pcmcia_device *p_dev)
+static int smc91c92_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int smc91c92_resume(struct pcmcia_device *p_dev)
+static int smc91c92_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
int i;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if ((smc->manfid == MANFID_MEGAHERTZ) &&
- (smc->cardid == PRODID_MEGAHERTZ_EM3288))
- mhz_3288_power(link);
- pcmcia_request_configuration(link->handle, &link->conf);
- if (smc->manfid == MANFID_MOTOROLA)
- mot_config(link);
- if ((smc->manfid == MANFID_OSITECH) &&
- (smc->cardid != PRODID_OSITECH_SEVEN)) {
- /* Power up the card and enable interrupts */
- set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR);
- set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR);
- }
- if (((smc->manfid == MANFID_OSITECH) &&
- (smc->cardid == PRODID_OSITECH_SEVEN)) ||
- ((smc->manfid == MANFID_PSION) &&
- (smc->cardid == PRODID_PSION_NET100))) {
- /* Download the Seven of Diamonds firmware */
- for (i = 0; i < sizeof(__Xilinx7OD); i++) {
- outb(__Xilinx7OD[i], link->io.BasePort1+2);
- udelay(50);
- }
- }
- if (link->open) {
- smc_reset(dev);
- netif_device_attach(dev);
+ if ((smc->manfid == MANFID_MEGAHERTZ) &&
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Power up the card and enable interrupts */
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR);
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR);
+ }
+ if (((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid == PRODID_OSITECH_SEVEN)) ||
+ ((smc->manfid == MANFID_PSION) &&
+ (smc->cardid == PRODID_PSION_NET100))) {
+ /* Download the Seven of Diamonds firmware */
+ for (i = 0; i < sizeof(__Xilinx7OD); i++) {
+ outb(__Xilinx7OD[i], link->io.BasePort1+2);
+ udelay(50);
}
}
+ if (link->open) {
+ smc_reset(dev);
+ netif_device_attach(dev);
+ }
return 0;
}
@@ -931,7 +909,7 @@ static int smc91c92_resume(struct pcmcia_device *p_dev)
======================================================================*/
-static int check_sig(dev_link_t *link)
+static int check_sig(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
kio_addr_t ioaddr = dev->base_addr;
@@ -964,13 +942,15 @@ static int check_sig(dev_link_t *link)
}
if (width) {
- printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n");
- smc91c92_suspend(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- pcmcia_request_io(link->handle, &link->io);
- smc91c92_resume(link->handle);
- return check_sig(link);
+ modconf_t mod = {
+ .Attributes = CONF_IO_CHANGE_WIDTH,
+ };
+ printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n");
+
+ smc91c92_suspend(link);
+ pcmcia_modify_configuration(link, &mod);
+ smc91c92_resume(link);
+ return check_sig(link);
}
return -ENODEV;
}
@@ -984,11 +964,10 @@ static int check_sig(dev_link_t *link)
======================================================================*/
#define CS_EXIT_TEST(ret, svc, label) \
-if (ret != CS_SUCCESS) { cs_error(link->handle, svc, ret); goto label; }
+if (ret != CS_SUCCESS) { cs_error(link, svc, ret); goto label; }
-static void smc91c92_config(dev_link_t *link)
+static int smc91c92_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
struct smc_cfg_mem *cfg_mem;
@@ -1015,21 +994,18 @@ static void smc91c92_config(dev_link_t *link)
tuple->TupleDataMax = 64;
tuple->DesiredTuple = CISTPL_CONFIG;
- i = first_tuple(handle, tuple, parse);
+ i = first_tuple(link, tuple, parse);
CS_EXIT_TEST(i, ParseTuple, config_failed);
link->conf.ConfigBase = parse->config.base;
link->conf.Present = parse->config.rmask[0];
tuple->DesiredTuple = CISTPL_MANFID;
tuple->Attributes = TUPLE_RETURN_COMMON;
- if (first_tuple(handle, tuple, parse) == CS_SUCCESS) {
+ if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
smc->manfid = parse->manfid.manf;
smc->cardid = parse->manfid.card;
}
- /* Configure card */
- link->state |= DEV_CONFIG;
-
if ((smc->manfid == MANFID_OSITECH) &&
(smc->cardid != PRODID_OSITECH_SEVEN)) {
i = osi_config(link);
@@ -1043,9 +1019,9 @@ static void smc91c92_config(dev_link_t *link)
}
CS_EXIT_TEST(i, RequestIO, config_failed);
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
CS_EXIT_TEST(i, RequestIRQ, config_failed);
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
CS_EXIT_TEST(i, RequestConfiguration, config_failed);
if (smc->manfid == MANFID_MOTOROLA)
@@ -1124,13 +1100,12 @@ static void smc91c92_config(dev_link_t *link)
SMC_SELECT_BANK(0);
}
- link->dev = &smc->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &smc->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
if (register_netdev(dev) != 0) {
printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto config_undo;
}
@@ -1160,15 +1135,14 @@ static void smc91c92_config(dev_link_t *link)
}
}
kfree(cfg_mem);
- return;
+ return 0;
config_undo:
unregister_netdev(dev);
config_failed: /* CS_EXIT_TEST() calls jump to here... */
smc91c92_release(link);
- link->state &= ~DEV_CONFIG_PENDING;
kfree(cfg_mem);
-
+ return -ENODEV;
} /* smc91c92_config */
/*======================================================================
@@ -1179,22 +1153,15 @@ config_failed: /* CS_EXIT_TEST() calls jump to here... */
======================================================================*/
-static void smc91c92_release(dev_link_t *link)
+static void smc91c92_release(struct pcmcia_device *link)
{
-
- DEBUG(0, "smc91c92_release(0x%p)\n", link);
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- if (link->win) {
- struct net_device *dev = link->priv;
- struct smc_private *smc = netdev_priv(dev);
- iounmap(smc->base);
- pcmcia_release_window(link->win);
- }
-
- link->state &= ~DEV_CONFIG;
+ DEBUG(0, "smc91c92_release(0x%p)\n", link);
+ if (link->win) {
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ iounmap(smc->base);
+ }
+ pcmcia_disable_device(link);
}
/*======================================================================
@@ -1283,7 +1250,7 @@ static void smc_dump(struct net_device *dev)
static int smc_open(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- dev_link_t *link = &smc->link;
+ struct pcmcia_device *link = smc->p_dev;
#ifdef PCMCIA_DEBUG
DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n",
@@ -1292,7 +1259,7 @@ static int smc_open(struct net_device *dev)
#endif
/* Check that the PCMCIA card is still here. */
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
/* Physical device present signature. */
if (check_sig(link) < 0) {
@@ -1320,7 +1287,7 @@ static int smc_open(struct net_device *dev)
static int smc_close(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- dev_link_t *link = &smc->link;
+ struct pcmcia_device *link = smc->p_dev;
kio_addr_t ioaddr = dev->base_addr;
DEBUG(0, "%s: smc_close(), status %4.4x.\n",
@@ -2311,7 +2278,7 @@ static struct pcmcia_driver smc91c92_cs_driver = {
.drv = {
.name = "smc91c92_cs",
},
- .probe = smc91c92_attach,
+ .probe = smc91c92_probe,
.remove = smc91c92_detach,
.id_table = smc91c92_ids,
.suspend = smc91c92_suspend,
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index eed496803fe40..71f45056a70ce 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -289,9 +289,9 @@ static void mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg,
* and ejection events. They are invoked from the event handler.
*/
-static int has_ce2_string(dev_link_t * link);
-static void xirc2ps_config(dev_link_t * link);
-static void xirc2ps_release(dev_link_t * link);
+static int has_ce2_string(struct pcmcia_device * link);
+static int xirc2ps_config(struct pcmcia_device * link);
+static void xirc2ps_release(struct pcmcia_device * link);
/****************
* The attach() and detach() entry points are used to create and destroy
@@ -313,10 +313,10 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs
/****************
* A linked list of "instances" of the device. Each actual
* PCMCIA card corresponds to one device instance, and is described
- * by one dev_link_t structure (defined in ds.h).
+ * by one struct pcmcia_device structure (defined in ds.h).
*
* You may not want to use a linked list for this -- for example, the
- * memory card driver uses an array of dev_link_t pointers, where minor
+ * memory card driver uses an array of struct pcmcia_device pointers, where minor
* device numbers are used to derive the corresponding array index.
*/
@@ -326,13 +326,13 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs
* example, ethernet cards, modems). In other cases, there may be
* many actual or logical devices (SCSI adapters, memory cards with
* multiple partitions). The dev_node_t structures need to be kept
- * in a linked list starting at the 'dev' field of a dev_link_t
+ * in a linked list starting at the 'dev' field of a struct pcmcia_device
* structure. We allocate them in the card's private data structure,
* because they generally can't be allocated dynamically.
*/
typedef struct local_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct net_device_stats stats;
int card_type;
@@ -355,7 +355,7 @@ static void do_tx_timeout(struct net_device *dev);
static struct net_device_stats *do_get_stats(struct net_device *dev);
static void set_addresses(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static int set_card_type(dev_link_t *link, const void *s);
+static int set_card_type(struct pcmcia_device *link, const void *s);
static int do_config(struct net_device *dev, struct ifmap *map);
static int do_open(struct net_device *dev);
static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -368,7 +368,7 @@ static int do_stop(struct net_device *dev);
/*=============== Helper functions =========================*/
static int
-first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
int err;
@@ -379,7 +379,7 @@ first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
}
static int
-next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
{
int err;
@@ -553,9 +553,8 @@ mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len)
*/
static int
-xirc2ps_attach(struct pcmcia_device *p_dev)
+xirc2ps_probe(struct pcmcia_device *link)
{
- dev_link_t *link;
struct net_device *dev;
local_info_t *local;
@@ -566,12 +565,11 @@ xirc2ps_attach(struct pcmcia_device *p_dev)
if (!dev)
return -ENOMEM;
local = netdev_priv(dev);
- link = &local->link;
+ local->p_dev = link;
link->priv = dev;
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
@@ -593,13 +591,7 @@ xirc2ps_attach(struct pcmcia_device *p_dev)
dev->watchdog_timeo = TX_TIMEOUT;
#endif
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- xirc2ps_config(link);
-
- return 0;
+ return xirc2ps_config(link);
} /* xirc2ps_attach */
/****************
@@ -610,18 +602,16 @@ xirc2ps_attach(struct pcmcia_device *p_dev)
*/
static void
-xirc2ps_detach(struct pcmcia_device *p_dev)
+xirc2ps_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "detach(0x%p)\n", link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- if (link->state & DEV_CONFIG)
- xirc2ps_release(link);
+ xirc2ps_release(link);
free_netdev(dev);
} /* xirc2ps_detach */
@@ -645,7 +635,7 @@ xirc2ps_detach(struct pcmcia_device *p_dev)
*
*/
static int
-set_card_type(dev_link_t *link, const void *s)
+set_card_type(struct pcmcia_device *link, const void *s)
{
struct net_device *dev = link->priv;
local_info_t *local = netdev_priv(dev);
@@ -714,9 +704,8 @@ set_card_type(dev_link_t *link, const void *s)
* Returns: true if this is a CE2
*/
static int
-has_ce2_string(dev_link_t * link)
+has_ce2_string(struct pcmcia_device * link)
{
- client_handle_t handle = link->handle;
tuple_t tuple;
cisparse_t parse;
u_char buf[256];
@@ -726,7 +715,7 @@ has_ce2_string(dev_link_t * link)
tuple.TupleDataMax = 254;
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_VERS_1;
- if (!first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 2) {
+ if (!first_tuple(link, &tuple, &parse) && parse.version_1.ns > 2) {
if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2"))
return 1;
}
@@ -738,10 +727,9 @@ has_ce2_string(dev_link_t * link)
* is received, to configure the PCMCIA socket, and to make the
* ethernet device available to the system.
*/
-static void
-xirc2ps_config(dev_link_t * link)
+static int
+xirc2ps_config(struct pcmcia_device * link)
{
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
local_info_t *local = netdev_priv(dev);
tuple_t tuple;
@@ -767,7 +755,7 @@ xirc2ps_config(dev_link_t * link)
/* Is this a valid card */
tuple.DesiredTuple = CISTPL_MANFID;
- if ((err=first_tuple(handle, &tuple, &parse))) {
+ if ((err=first_tuple(link, &tuple, &parse))) {
printk(KNOT_XIRC "manfid not found in CIS\n");
goto failure;
}
@@ -803,15 +791,15 @@ xirc2ps_config(dev_link_t * link)
/* get configuration stuff */
tuple.DesiredTuple = CISTPL_CONFIG;
- if ((err=first_tuple(handle, &tuple, &parse)))
+ if ((err=first_tuple(link, &tuple, &parse)))
goto cis_error;
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
/* get the ethernet address from the CIS */
tuple.DesiredTuple = CISTPL_FUNCE;
- for (err = first_tuple(handle, &tuple, &parse); !err;
- err = next_tuple(handle, &tuple, &parse)) {
+ for (err = first_tuple(link, &tuple, &parse); !err;
+ err = next_tuple(link, &tuple, &parse)) {
/* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries:
* the first one with a length of zero the second correct -
* so I skip all entries with length 0 */
@@ -821,8 +809,8 @@ xirc2ps_config(dev_link_t * link)
}
if (err) { /* not found: try to get the node-id from tuple 0x89 */
tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */
- if ((err = pcmcia_get_first_tuple(handle, &tuple)) == 0 &&
- (err = pcmcia_get_tuple_data(handle, &tuple)) == 0) {
+ if ((err = pcmcia_get_first_tuple(link, &tuple)) == 0 &&
+ (err = pcmcia_get_tuple_data(link, &tuple)) == 0) {
if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID)
memcpy(&parse, buf, 8);
else
@@ -831,8 +819,8 @@ xirc2ps_config(dev_link_t * link)
}
if (err) { /* another try (James Lehmer's CE2 version 4.1)*/
tuple.DesiredTuple = CISTPL_FUNCE;
- for (err = first_tuple(handle, &tuple, &parse); !err;
- err = next_tuple(handle, &tuple, &parse)) {
+ for (err = first_tuple(link, &tuple, &parse); !err;
+ err = next_tuple(link, &tuple, &parse)) {
if (parse.funce.type == 0x02 && parse.funce.data[0] == 1
&& parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) {
buf[1] = 4;
@@ -853,9 +841,6 @@ xirc2ps_config(dev_link_t * link)
for (i=0; i < 6; i++)
dev->dev_addr[i] = node_id->id[i];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
link->io.IOAddrLines =10;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
link->irq.Attributes = IRQ_HANDLE_PRESENT;
@@ -875,14 +860,14 @@ xirc2ps_config(dev_link_t * link)
* Ethernet port */
link->io.NumPorts1 = 16; /* no Mako stuff anymore */
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- for (err = first_tuple(handle, &tuple, &parse); !err;
- err = next_tuple(handle, &tuple, &parse)) {
+ for (err = first_tuple(link, &tuple, &parse); !err;
+ err = next_tuple(link, &tuple, &parse)) {
if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
link->conf.ConfigIndex = cf->index ;
link->io.BasePort2 = cf->io.win[0].base;
link->io.BasePort1 = ioaddr;
- if (!(err=pcmcia_request_io(link->handle, &link->io)))
+ if (!(err=pcmcia_request_io(link, &link->io)))
goto port_found;
}
}
@@ -896,15 +881,15 @@ xirc2ps_config(dev_link_t * link)
*/
for (pass=0; pass < 2; pass++) {
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- for (err = first_tuple(handle, &tuple, &parse); !err;
- err = next_tuple(handle, &tuple, &parse)){
+ for (err = first_tuple(link, &tuple, &parse); !err;
+ err = next_tuple(link, &tuple, &parse)){
if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8){
link->conf.ConfigIndex = cf->index ;
link->io.BasePort2 = cf->io.win[0].base;
link->io.BasePort1 = link->io.BasePort2
+ (pass ? (cf->index & 0x20 ? -24:8)
: (cf->index & 0x20 ? 8:-24));
- if (!(err=pcmcia_request_io(link->handle, &link->io)))
+ if (!(err=pcmcia_request_io(link, &link->io)))
goto port_found;
}
}
@@ -919,12 +904,12 @@ xirc2ps_config(dev_link_t * link)
link->io.NumPorts1 = 16;
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
link->io.BasePort1 = ioaddr;
- if (!(err=pcmcia_request_io(link->handle, &link->io)))
+ if (!(err=pcmcia_request_io(link, &link->io)))
goto port_found;
}
link->io.BasePort1 = 0; /* let CS decide */
- if ((err=pcmcia_request_io(link->handle, &link->io))) {
- cs_error(link->handle, RequestIO, err);
+ if ((err=pcmcia_request_io(link, &link->io))) {
+ cs_error(link, RequestIO, err);
goto config_error;
}
}
@@ -936,8 +921,8 @@ xirc2ps_config(dev_link_t * link)
* Now allocate an interrupt line. Note that this does not
* actually assign a handler to the interrupt.
*/
- if ((err=pcmcia_request_irq(link->handle, &link->irq))) {
- cs_error(link->handle, RequestIRQ, err);
+ if ((err=pcmcia_request_irq(link, &link->irq))) {
+ cs_error(link, RequestIRQ, err);
goto config_error;
}
@@ -945,8 +930,8 @@ xirc2ps_config(dev_link_t * link)
* This actually configures the PCMCIA socket -- setting up
* the I/O windows and the interrupt mapping.
*/
- if ((err=pcmcia_request_configuration(link->handle, &link->conf))) {
- cs_error(link->handle, RequestConfiguration, err);
+ if ((err=pcmcia_request_configuration(link, &link->conf))) {
+ cs_error(link, RequestConfiguration, err);
goto config_error;
}
@@ -963,15 +948,15 @@ xirc2ps_config(dev_link_t * link)
reg.Action = CS_WRITE;
reg.Offset = CISREG_IOBASE_0;
reg.Value = link->io.BasePort2 & 0xff;
- if ((err = pcmcia_access_configuration_register(link->handle, &reg))) {
- cs_error(link->handle, AccessConfigurationRegister, err);
+ if ((err = pcmcia_access_configuration_register(link, &reg))) {
+ cs_error(link, AccessConfigurationRegister, err);
goto config_error;
}
reg.Action = CS_WRITE;
reg.Offset = CISREG_IOBASE_1;
reg.Value = (link->io.BasePort2 >> 8) & 0xff;
- if ((err = pcmcia_access_configuration_register(link->handle, &reg))) {
- cs_error(link->handle, AccessConfigurationRegister, err);
+ if ((err = pcmcia_access_configuration_register(link, &reg))) {
+ cs_error(link, AccessConfigurationRegister, err);
goto config_error;
}
@@ -982,15 +967,15 @@ xirc2ps_config(dev_link_t * link)
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
req.Base = req.Size = 0;
req.AccessSpeed = 0;
- if ((err = pcmcia_request_window(&link->handle, &req, &link->win))) {
- cs_error(link->handle, RequestWindow, err);
+ if ((err = pcmcia_request_window(&link, &req, &link->win))) {
+ cs_error(link, RequestWindow, err);
goto config_error;
}
local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
mem.CardOffset = 0x0;
mem.Page = 0;
if ((err = pcmcia_map_mem_page(link->win, &mem))) {
- cs_error(link->handle, MapMemPage, err);
+ cs_error(link, MapMemPage, err);
goto config_error;
}
@@ -1050,13 +1035,12 @@ xirc2ps_config(dev_link_t * link)
if (local->dingo)
do_reset(dev, 1); /* a kludge to make the cem56 work */
- link->dev = &local->node;
- link->state &= ~DEV_CONFIG_PENDING;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ link->dev_node = &local->node;
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
if ((err=register_netdev(dev))) {
printk(KNOT_XIRC "register_netdev() failed\n");
- link->dev = NULL;
+ link->dev_node = NULL;
goto config_error;
}
@@ -1069,17 +1053,16 @@ xirc2ps_config(dev_link_t * link)
printk("%c%02X", i?':':' ', dev->dev_addr[i]);
printk("\n");
- return;
+ return 0;
config_error:
- link->state &= ~DEV_CONFIG_PENDING;
xirc2ps_release(link);
- return;
+ return -ENODEV;
cis_error:
printk(KNOT_XIRC "unable to parse CIS\n");
failure:
- link->state &= ~DEV_CONFIG_PENDING;
+ return -ENODEV;
} /* xirc2ps_config */
/****************
@@ -1088,57 +1071,41 @@ xirc2ps_config(dev_link_t * link)
* still open, this will be postponed until it is closed.
*/
static void
-xirc2ps_release(dev_link_t *link)
+xirc2ps_release(struct pcmcia_device *link)
{
+ DEBUG(0, "release(0x%p)\n", link);
- DEBUG(0, "release(0x%p)\n", link);
-
- if (link->win) {
- struct net_device *dev = link->priv;
- local_info_t *local = netdev_priv(dev);
- if (local->dingo)
- iounmap(local->dingo_ccr - 0x0800);
- pcmcia_release_window(link->win);
- }
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
-
+ if (link->win) {
+ struct net_device *dev = link->priv;
+ local_info_t *local = netdev_priv(dev);
+ if (local->dingo)
+ iounmap(local->dingo_ccr - 0x0800);
+ }
+ pcmcia_disable_device(link);
} /* xirc2ps_release */
/*====================================================================*/
-static int xirc2ps_suspend(struct pcmcia_device *p_dev)
+static int xirc2ps_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_device_detach(dev);
- do_powerdown(dev);
- }
- pcmcia_release_configuration(link->handle);
+ if (link->open) {
+ netif_device_detach(dev);
+ do_powerdown(dev);
}
return 0;
}
-static int xirc2ps_resume(struct pcmcia_device *p_dev)
+static int xirc2ps_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- do_reset(dev,1);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ do_reset(dev,1);
+ netif_device_attach(dev);
}
return 0;
@@ -1552,13 +1519,13 @@ static int
do_open(struct net_device *dev)
{
local_info_t *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
DEBUG(0, "do_open(%p)\n", dev);
/* Check that the PCMCIA card is still here. */
/* Physical device present signature. */
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
/* okay */
@@ -1882,7 +1849,7 @@ do_stop(struct net_device *dev)
{
kio_addr_t ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
- dev_link_t *link = &lp->link;
+ struct pcmcia_device *link = lp->p_dev;
DEBUG(0, "do_stop(%p)\n", dev);
@@ -1935,7 +1902,7 @@ static struct pcmcia_driver xirc2ps_cs_driver = {
.drv = {
.name = "xirc2ps_cs",
},
- .probe = xirc2ps_attach,
+ .probe = xirc2ps_probe,
.remove = xirc2ps_detach,
.id_table = xirc2ps_ids,
.suspend = xirc2ps_suspend,
@@ -1973,7 +1940,7 @@ static int __init setup_xirc2ps_cs(char *str)
MAYBE_SET(lockup_hack, 6);
#undef MAYBE_SET
- return 0;
+ return 1;
}
__setup("xirc2ps_cs=", setup_xirc2ps_cs);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 35b18057fbdd4..45ad036733e21 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -2122,8 +2122,7 @@ static void __devexit starfire_remove_one (struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct netdev_private *np = netdev_priv(dev);
- if (!dev)
- BUG();
+ BUG_ON(!dev);
unregister_netdev(dev);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 964c096448324..0b5358072172a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.55"
-#define DRV_MODULE_RELDATE "Mar 27, 2006"
+#define DRV_MODULE_VERSION "3.56"
+#define DRV_MODULE_RELDATE "Apr 1, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -497,40 +497,33 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
unsigned long flags;
spin_lock_irqsave(&tp->indirect_lock, flags);
- if (tp->write32 != tg3_write_indirect_reg32) {
- tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
- tw32_f(TG3PCI_MEM_WIN_DATA, val);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
- /* Always leave this as zero. */
- tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
- } else {
- pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
- pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
-
- /* Always leave this as zero. */
- pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
- }
+ /* Always leave this as zero. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
spin_unlock_irqrestore(&tp->indirect_lock, flags);
}
+static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
+{
+ /* If no workaround is needed, write to mem space directly */
+ if (tp->write32 != tg3_write_indirect_reg32)
+ tw32(NIC_SRAM_WIN_BASE + off, val);
+ else
+ tg3_write_mem(tp, off, val);
+}
+
static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
{
unsigned long flags;
spin_lock_irqsave(&tp->indirect_lock, flags);
- if (tp->write32 != tg3_write_indirect_reg32) {
- tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
- *val = tr32(TG3PCI_MEM_WIN_DATA);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+ pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
- /* Always leave this as zero. */
- tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
- } else {
- pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
- pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
-
- /* Always leave this as zero. */
- pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
- }
+ /* Always leave this as zero. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
spin_unlock_irqrestore(&tp->indirect_lock, flags);
}
@@ -1374,12 +1367,12 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
}
}
- tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
-
/* Finally, set the new power state. */
pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
udelay(100); /* Delay after power state change */
+ tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+
return 0;
}
@@ -2966,9 +2959,7 @@ static void tg3_tx(struct tg3 *tp)
struct sk_buff *skb = ri->skb;
int i;
- if (unlikely(skb == NULL))
- BUG();
-
+ BUG_ON(skb == NULL);
pci_unmap_single(tp->pdev,
pci_unmap_addr(ri, mapping),
skb_headlen(skb),
@@ -2979,12 +2970,10 @@ static void tg3_tx(struct tg3 *tp)
sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- if (unlikely(sw_idx == hw_idx))
- BUG();
+ BUG_ON(sw_idx == hw_idx);
ri = &tp->tx_buffers[sw_idx];
- if (unlikely(ri->skb != NULL))
- BUG();
+ BUG_ON(ri->skb != NULL);
pci_unmap_page(tp->pdev,
pci_unmap_addr(ri, mapping),
@@ -4935,9 +4924,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
{
int i;
- if (offset == TX_CPU_BASE &&
- (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
- BUG();
+ BUG_ON(offset == TX_CPU_BASE &&
+ (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
if (offset == RX_CPU_BASE) {
for (i = 0; i < 10000; i++) {
@@ -6547,11 +6535,11 @@ static void tg3_timer(unsigned long __opaque)
if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
u32 val;
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
- FWCMD_NICDRV_ALIVE2);
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
+ tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
+ FWCMD_NICDRV_ALIVE2);
+ tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
/* 5 seconds timeout */
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
+ tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
val = tr32(GRC_RX_CPU_EVENT);
val |= (1 << 14);
tw32(GRC_RX_CPU_EVENT, val);
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index e4cfc80b283b7..99c4c1922f190 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -3,7 +3,7 @@
#
menu "Token Ring devices"
- depends on NETDEVICES
+ depends on NETDEVICES && !UML
# So far, we only have PCI, ISA, and MCA token ring devices
config TR
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 9345e68c451eb..649d8ea354f55 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -438,8 +438,7 @@ static void __devexit abyss_detach (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- if (!dev)
- BUG();
+ BUG_ON(!dev);
unregister_netdev(dev);
release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
free_irq(dev->irq, dev);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 3a25d191ea4af..19e6f4dfd69ce 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -735,8 +735,7 @@ static int __devexit madgemc_remove(struct device *device)
struct net_local *tp;
struct card_info *card;
- if (!dev)
- BUG();
+ BUG_ON(!dev);
tp = dev->priv;
card = tp->tmspriv;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f85e301900088..bad09ebdb50b9 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -356,7 +356,7 @@ config PCI_HERMES
config ATMEL
tristate "Atmel at76c50x chipset 802.11b support"
- depends on NET_RADIO
+ depends on NET_RADIO && (PCI || PCMCIA)
select FW_LOADER
select CRC32
---help---
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index a496460ce2249..af0cbb6c5c0c6 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -80,8 +80,8 @@ MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards");
event handler.
*/
-static void airo_config(dev_link_t *link);
-static void airo_release(dev_link_t *link);
+static int airo_config(struct pcmcia_device *link);
+static void airo_release(struct pcmcia_device *link);
/*
The attach() and detach() entry points are used to create and destroy
@@ -101,10 +101,10 @@ static void airo_detach(struct pcmcia_device *p_dev);
/*
A linked list of "instances" of the aironet device. Each actual
PCMCIA card corresponds to one device instance, and is described
- by one dev_link_t structure (defined in ds.h).
+ by one struct pcmcia_device structure (defined in ds.h).
You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of dev_link_t pointers, where minor
+ memory card driver uses an array of struct pcmcia_device pointers, where minor
device numbers are used to derive the corresponding array index.
*/
@@ -114,7 +114,7 @@ static void airo_detach(struct pcmcia_device *p_dev);
example, ethernet cards, modems). In other cases, there may be
many actual or logical devices (SCSI adapters, memory cards with
multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a dev_link_t
+ in a linked list starting at the 'dev' field of a struct pcmcia_device
structure. We allocate them in the card's private data structure,
because they generally shouldn't be allocated dynamically.
@@ -141,24 +141,16 @@ typedef struct local_info_t {
======================================================================*/
-static int airo_attach(struct pcmcia_device *p_dev)
+static int airo_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
local_info_t *local;
DEBUG(0, "airo_attach()\n");
- /* Initialize the dev_link_t structure */
- link = kzalloc(sizeof(struct dev_link_t), GFP_KERNEL);
- if (!link) {
- printk(KERN_ERR "airo_cs: no memory for new device\n");
- return -ENOMEM;
- }
-
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
- link->irq.Handler = NULL;
+ p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ p_dev->irq.Handler = NULL;
/*
General socket configuration defaults can go here. In this
@@ -167,26 +159,18 @@ static int airo_attach(struct pcmcia_device *p_dev)
and attributes of IO windows) are fixed by the nature of the
device, and can be hard-wired here.
*/
- link->conf.Attributes = 0;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
+ p_dev->conf.Attributes = 0;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local) {
printk(KERN_ERR "airo_cs: no memory for new device\n");
- kfree (link);
return -ENOMEM;
}
- link->priv = local;
+ p_dev->priv = local;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- airo_config(link);
-
- return 0;
+ return airo_config(p_dev);
} /* airo_attach */
/*======================================================================
@@ -198,14 +182,11 @@ static int airo_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void airo_detach(struct pcmcia_device *p_dev)
+static void airo_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DEBUG(0, "airo_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG)
- airo_release(link);
+ airo_release(link);
if ( ((local_info_t*)link->priv)->eth_dev ) {
stop_airo_card( ((local_info_t*)link->priv)->eth_dev, 0 );
@@ -213,7 +194,6 @@ static void airo_detach(struct pcmcia_device *p_dev)
((local_info_t*)link->priv)->eth_dev = NULL;
kfree(link->priv);
- kfree(link);
} /* airo_detach */
/*======================================================================
@@ -227,9 +207,8 @@ static void airo_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void airo_config(dev_link_t *link)
+static int airo_config(struct pcmcia_device *link)
{
- client_handle_t handle;
tuple_t tuple;
cisparse_t parse;
local_info_t *dev;
@@ -237,8 +216,7 @@ static void airo_config(dev_link_t *link)
u_char buf[64];
win_req_t req;
memreq_t map;
-
- handle = link->handle;
+
dev = link->priv;
DEBUG(0, "airo_config(0x%p)\n", link);
@@ -252,15 +230,12 @@ static void airo_config(dev_link_t *link)
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
-
- /* Configure card */
- link->state |= DEV_CONFIG;
-
+
/*
In this loop, we scan the CIS for configuration table entries,
each of which describes a valid card configuration, including
@@ -274,12 +249,12 @@ static void airo_config(dev_link_t *link)
will only use the CIS to fill in implementation-defined details.
*/
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
cistpl_cftable_entry_t dflt = { 0 };
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
@@ -294,16 +269,11 @@ static void airo_config(dev_link_t *link)
/* Use power settings for Vcc and Vpp if present */
/* Note that the CIS values need to be rescaled */
- if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vcc = cfg->vcc.param[CISTPL_POWER_VNOM]/10000;
- else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vcc = dflt.vcc.param[CISTPL_POWER_VNOM]/10000;
-
if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
/* Do we need to allocate an interrupt? */
@@ -329,12 +299,12 @@ static void airo_config(dev_link_t *link)
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
/*
Now set up a common memory window, if needed. There is room
- in the dev_link_t structure for one memory window handle,
+ in the struct pcmcia_device structure for one memory window handle,
but if the base addresses need to be saved, or if multiple
windows are needed, the info should go in the private data
structure for this device.
@@ -350,7 +320,7 @@ static void airo_config(dev_link_t *link)
req.Base = mem->win[0].host_addr;
req.Size = mem->win[0].len;
req.AccessSpeed = 0;
- if (pcmcia_request_window(&link->handle, &req, &link->win) != 0)
+ if (pcmcia_request_window(&link, &req, &link->win) != 0)
goto next_entry;
map.Page = 0; map.CardOffset = mem->win[0].card_addr;
if (pcmcia_map_mem_page(link->win, &map) != 0)
@@ -360,7 +330,7 @@ static void airo_config(dev_link_t *link)
break;
next_entry:
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
/*
@@ -369,33 +339,32 @@ static void airo_config(dev_link_t *link)
irq structure is initialized.
*/
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
/*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
card and host interface into "Memory and IO" mode.
*/
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
((local_info_t*)link->priv)->eth_dev =
init_airo_card( link->irq.AssignedIRQ,
- link->io.BasePort1, 1, &handle_to_dev(handle) );
+ link->io.BasePort1, 1, &handle_to_dev(link) );
if (!((local_info_t*)link->priv)->eth_dev) goto cs_failed;
/*
At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
+ initialized and arranged in a linked list at link->dev_node.
*/
strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
dev->node.major = dev->node.minor = 0;
- link->dev = &dev->node;
+ link->dev_node = &dev->node;
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
- dev->node.dev_name, link->conf.ConfigIndex,
- link->conf.Vcc/10, link->conf.Vcc%10);
- if (link->conf.Vpp1)
- printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ printk(KERN_INFO "%s: index 0x%02x: ",
+ dev->node.dev_name, link->conf.ConfigIndex);
+ if (link->conf.Vpp)
+ printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq.AssignedIRQ);
if (link->io.NumPorts1)
@@ -408,14 +377,12 @@ static void airo_config(dev_link_t *link)
printk(", mem 0x%06lx-0x%06lx", req.Base,
req.Base+req.Size-1);
printk("\n");
-
- link->state &= ~DEV_CONFIG_PENDING;
- return;
-
+ return 0;
+
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
airo_release(link);
-
+ return -ENODEV;
} /* airo_config */
/*======================================================================
@@ -426,51 +393,26 @@ static void airo_config(dev_link_t *link)
======================================================================*/
-static void airo_release(dev_link_t *link)
+static void airo_release(struct pcmcia_device *link)
{
DEBUG(0, "airo_release(0x%p)\n", link);
-
- /* Unlink the device chain */
- link->dev = NULL;
-
- /*
- In a normal driver, additional code may be needed to release
- other kernel data structures associated with this device.
- */
-
- /* Don't bother checking to see if these succeed or not */
- if (link->win)
- pcmcia_release_window(link->win);
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- if (link->irq.AssignedIRQ)
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
}
-static int airo_suspend(struct pcmcia_device *p_dev)
+static int airo_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
local_info_t *local = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- netif_device_detach(local->eth_dev);
- pcmcia_release_configuration(link->handle);
- }
+ netif_device_detach(local->eth_dev);
return 0;
}
-static int airo_resume(struct pcmcia_device *p_dev)
+static int airo_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
local_info_t *local = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
+ if (link->open) {
reset_airo_card(local->eth_dev);
netif_device_attach(local->eth_dev);
}
@@ -492,7 +434,7 @@ static struct pcmcia_driver airo_driver = {
.drv = {
.name = "airo_cs",
},
- .probe = airo_attach,
+ .probe = airo_probe,
.remove = airo_detach,
.id_table = airo_ids,
.suspend = airo_suspend,
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index d6f4a5a3e55a6..26bf1127524df 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -91,8 +91,8 @@ MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards");
event handler.
*/
-static void atmel_config(dev_link_t *link);
-static void atmel_release(dev_link_t *link);
+static int atmel_config(struct pcmcia_device *link);
+static void atmel_release(struct pcmcia_device *link);
/*
The attach() and detach() entry points are used to create and destroy
@@ -112,10 +112,10 @@ static void atmel_detach(struct pcmcia_device *p_dev);
/*
A linked list of "instances" of the atmelnet device. Each actual
PCMCIA card corresponds to one device instance, and is described
- by one dev_link_t structure (defined in ds.h).
+ by one struct pcmcia_device structure (defined in ds.h).
You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of dev_link_t pointers, where minor
+ memory card driver uses an array of struct pcmcia_device pointers, where minor
device numbers are used to derive the corresponding array index.
*/
@@ -125,7 +125,7 @@ static void atmel_detach(struct pcmcia_device *p_dev);
example, ethernet cards, modems). In other cases, there may be
many actual or logical devices (SCSI adapters, memory cards with
multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a dev_link_t
+ in a linked list starting at the 'dev' field of a struct pcmcia_device
structure. We allocate them in the card's private data structure,
because they generally shouldn't be allocated dynamically.
@@ -152,24 +152,16 @@ typedef struct local_info_t {
======================================================================*/
-static int atmel_attach(struct pcmcia_device *p_dev)
+static int atmel_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
local_info_t *local;
DEBUG(0, "atmel_attach()\n");
- /* Initialize the dev_link_t structure */
- link = kzalloc(sizeof(struct dev_link_t), GFP_KERNEL);
- if (!link) {
- printk(KERN_ERR "atmel_cs: no memory for new device\n");
- return -ENOMEM;
- }
-
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
- link->irq.Handler = NULL;
+ p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ p_dev->irq.Handler = NULL;
/*
General socket configuration defaults can go here. In this
@@ -178,26 +170,18 @@ static int atmel_attach(struct pcmcia_device *p_dev)
and attributes of IO windows) are fixed by the nature of the
device, and can be hard-wired here.
*/
- link->conf.Attributes = 0;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
+ p_dev->conf.Attributes = 0;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local) {
printk(KERN_ERR "atmel_cs: no memory for new device\n");
- kfree (link);
return -ENOMEM;
}
- link->priv = local;
+ p_dev->priv = local;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- atmel_config(link);
-
- return 0;
+ return atmel_config(p_dev);
} /* atmel_attach */
/*======================================================================
@@ -209,17 +193,13 @@ static int atmel_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void atmel_detach(struct pcmcia_device *p_dev)
+static void atmel_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DEBUG(0, "atmel_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG)
- atmel_release(link);
+ atmel_release(link);
kfree(link->priv);
- kfree(link);
}
/*======================================================================
@@ -236,19 +216,17 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
/* Call-back function to interrogate PCMCIA-specific information
about the current existance of the card */
static int card_present(void *arg)
-{
- dev_link_t *link = (dev_link_t *)arg;
- if (link->state & DEV_SUSPEND)
- return 0;
- else if (link->state & DEV_PRESENT)
+{
+ struct pcmcia_device *link = (struct pcmcia_device *)arg;
+
+ if (pcmcia_dev_present(link))
return 1;
-
+
return 0;
}
-static void atmel_config(dev_link_t *link)
+static int atmel_config(struct pcmcia_device *link)
{
- client_handle_t handle;
tuple_t tuple;
cisparse_t parse;
local_info_t *dev;
@@ -256,9 +234,8 @@ static void atmel_config(dev_link_t *link)
u_char buf[64];
struct pcmcia_device_id *did;
- handle = link->handle;
dev = link->priv;
- did = handle_to_dev(handle).driver_data;
+ did = handle_to_dev(link).driver_data;
DEBUG(0, "atmel_config(0x%p)\n", link);
@@ -272,15 +249,12 @@ static void atmel_config(dev_link_t *link)
registers.
*/
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
-
- /* Configure card */
- link->state |= DEV_CONFIG;
-
+
/*
In this loop, we scan the CIS for configuration table entries,
each of which describes a valid card configuration, including
@@ -294,12 +268,12 @@ static void atmel_config(dev_link_t *link)
will only use the CIS to fill in implementation-defined details.
*/
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
cistpl_cftable_entry_t dflt = { 0 };
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
@@ -314,16 +288,11 @@ static void atmel_config(dev_link_t *link)
/* Use power settings for Vcc and Vpp if present */
/* Note that the CIS values need to be rescaled */
- if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vcc = cfg->vcc.param[CISTPL_POWER_VNOM]/10000;
- else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vcc = dflt.vcc.param[CISTPL_POWER_VNOM]/10000;
-
if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
/* Do we need to allocate an interrupt? */
@@ -349,14 +318,14 @@ static void atmel_config(dev_link_t *link)
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
/* If we got this far, we're cool! */
break;
next_entry:
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
/*
@@ -365,14 +334,14 @@ static void atmel_config(dev_link_t *link)
irq structure is initialized.
*/
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
/*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
card and host interface into "Memory and IO" mode.
*/
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
if (link->irq.AssignedIRQ == 0) {
printk(KERN_ALERT
@@ -384,7 +353,7 @@ static void atmel_config(dev_link_t *link)
init_atmel_card(link->irq.AssignedIRQ,
link->io.BasePort1,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
- &handle_to_dev(handle),
+ &handle_to_dev(link),
card_present,
link);
if (!((local_info_t*)link->priv)->eth_dev)
@@ -393,18 +362,18 @@ static void atmel_config(dev_link_t *link)
/*
At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
+ initialized and arranged in a linked list at link->dev_node.
*/
strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
dev->node.major = dev->node.minor = 0;
- link->dev = &dev->node;
-
- link->state &= ~DEV_CONFIG_PENDING;
- return;
-
+ link->dev_node = &dev->node;
+
+ return 0;
+
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
atmel_release(link);
+ return -ENODEV;
}
/*======================================================================
@@ -415,53 +384,34 @@ static void atmel_config(dev_link_t *link)
======================================================================*/
-static void atmel_release(dev_link_t *link)
+static void atmel_release(struct pcmcia_device *link)
{
struct net_device *dev = ((local_info_t*)link->priv)->eth_dev;
-
+
DEBUG(0, "atmel_release(0x%p)\n", link);
-
- /* Unlink the device chain */
- link->dev = NULL;
-
- if (dev)
+
+ if (dev)
stop_atmel_card(dev);
- ((local_info_t*)link->priv)->eth_dev = NULL;
-
- /* Don't bother checking to see if these succeed or not */
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- if (link->irq.AssignedIRQ)
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ ((local_info_t*)link->priv)->eth_dev = NULL;
+
+ pcmcia_disable_device(link);
}
-static int atmel_suspend(struct pcmcia_device *dev)
+static int atmel_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
local_info_t *local = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- netif_device_detach(local->eth_dev);
- pcmcia_release_configuration(link->handle);
- }
+ netif_device_detach(local->eth_dev);
return 0;
}
-static int atmel_resume(struct pcmcia_device *dev)
+static int atmel_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
local_info_t *local = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- atmel_open(local->eth_dev);
- netif_device_attach(local->eth_dev);
- }
+ atmel_open(local->eth_dev);
+ netif_device_attach(local->eth_dev);
return 0;
}
@@ -515,7 +465,7 @@ static struct pcmcia_driver atmel_driver = {
.drv = {
.name = "atmel_cs",
},
- .probe = atmel_attach,
+ .probe = atmel_probe,
.remove = atmel_detach,
.id_table = atmel_ids,
.suspend = atmel_suspend,
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index d335b250923a1..55bed923fbe9f 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
/* struct local_info::hw_priv */
struct hostap_cs_priv {
dev_node_t node;
- dev_link_t *link;
+ struct pcmcia_device *link;
int sandisk_connectplus;
};
@@ -204,15 +204,13 @@ static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
static void prism2_detach(struct pcmcia_device *p_dev);
static void prism2_release(u_long arg);
-static int prism2_config(dev_link_t *link);
+static int prism2_config(struct pcmcia_device *link);
static int prism2_pccard_card_present(local_info_t *local)
{
struct hostap_cs_priv *hw_priv = local->hw_priv;
- if (hw_priv != NULL && hw_priv->link != NULL &&
- ((hw_priv->link->state & (DEV_PRESENT | DEV_CONFIG)) ==
- (DEV_PRESENT | DEV_CONFIG)))
+ if (hw_priv != NULL && hw_priv->link != NULL && pcmcia_dev_present(hw_priv->link))
return 1;
return 0;
}
@@ -237,7 +235,7 @@ static void sandisk_set_iobase(local_info_t *local)
reg.Action = CS_WRITE;
reg.Offset = 0x10; /* 0x3f0 IO base 1 */
reg.Value = hw_priv->link->io.BasePort1 & 0x00ff;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -"
@@ -249,7 +247,7 @@ static void sandisk_set_iobase(local_info_t *local)
reg.Action = CS_WRITE;
reg.Offset = 0x12; /* 0x3f2 IO base 2 */
reg.Value = (hw_priv->link->io.BasePort1 & 0xff00) >> 8;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -"
@@ -301,9 +299,9 @@ static int sandisk_enable_wireless(struct net_device *dev)
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- if (pcmcia_get_first_tuple(hw_priv->link->handle, &tuple) ||
- pcmcia_get_tuple_data(hw_priv->link->handle, &tuple) ||
- pcmcia_parse_tuple(hw_priv->link->handle, &tuple, parse) ||
+ if (pcmcia_get_first_tuple(hw_priv->link, &tuple) ||
+ pcmcia_get_tuple_data(hw_priv->link, &tuple) ||
+ pcmcia_parse_tuple(hw_priv->link, &tuple, parse) ||
parse->manfid.manf != 0xd601 || parse->manfid.card != 0x0101) {
/* No SanDisk manfid found */
ret = -ENODEV;
@@ -311,9 +309,9 @@ static int sandisk_enable_wireless(struct net_device *dev)
}
tuple.DesiredTuple = CISTPL_LONGLINK_MFC;
- if (pcmcia_get_first_tuple(hw_priv->link->handle, &tuple) ||
- pcmcia_get_tuple_data(hw_priv->link->handle, &tuple) ||
- pcmcia_parse_tuple(hw_priv->link->handle, &tuple, parse) ||
+ if (pcmcia_get_first_tuple(hw_priv->link, &tuple) ||
+ pcmcia_get_tuple_data(hw_priv->link, &tuple) ||
+ pcmcia_parse_tuple(hw_priv->link, &tuple, parse) ||
parse->longlink_mfc.nfn < 2) {
/* No multi-function links found */
ret = -ENODEV;
@@ -328,7 +326,7 @@ static int sandisk_enable_wireless(struct net_device *dev)
reg.Action = CS_WRITE;
reg.Offset = CISREG_COR;
reg.Value = COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
@@ -345,7 +343,7 @@ static int sandisk_enable_wireless(struct net_device *dev)
* will be enabled during the first cor_sreset call.
*/
reg.Value = COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE | COR_FUNC_ENA;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
@@ -380,7 +378,7 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
reg.Action = CS_READ;
reg.Offset = CISREG_COR;
reg.Value = 0;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n",
@@ -392,7 +390,7 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
reg.Action = CS_WRITE;
reg.Value |= COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n",
@@ -405,7 +403,7 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
reg.Value &= ~COR_SOFT_RESET;
if (hw_priv->sandisk_connectplus)
reg.Value |= COR_IREQ_ENA;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n",
@@ -439,7 +437,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
reg.Action = CS_READ;
reg.Offset = CISREG_COR;
reg.Value = 0;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
@@ -452,7 +450,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
reg.Action = CS_WRITE;
reg.Value |= COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
@@ -466,7 +464,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
reg.Action = CS_WRITE;
reg.Value = hcr;
reg.Offset = CISREG_CCSR;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
@@ -478,7 +476,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
reg.Action = CS_WRITE;
reg.Offset = CISREG_COR;
reg.Value = old_cor & ~COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link->handle,
+ res = pcmcia_access_configuration_register(hw_priv->link,
&reg);
if (res != CS_SUCCESS) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
@@ -501,40 +499,27 @@ static struct prism2_helper_functions prism2_pccard_funcs =
/* allocate local data and register with CardServices
* initialize dev_link structure, but do not configure the card yet */
-static int prism2_attach(struct pcmcia_device *p_dev)
+static int hostap_cs_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
-
- link = kmalloc(sizeof(dev_link_t), GFP_KERNEL);
- if (link == NULL)
- return -ENOMEM;
-
- memset(link, 0, sizeof(dev_link_t));
+ int ret;
PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info);
- link->conf.Vcc = 33;
- link->conf.IntType = INT_MEMORY_AND_IO;
-
- link->handle = p_dev;
- p_dev->instance = link;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- if (prism2_config(link))
+ ret = prism2_config(p_dev);
+ if (ret) {
PDEBUG(DEBUG_EXTRA, "prism2_config() failed\n");
+ }
- return 0;
+ return ret;
}
-static void prism2_detach(struct pcmcia_device *p_dev)
+static void prism2_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
PDEBUG(DEBUG_FLOW, "prism2_detach\n");
- if (link->state & DEV_CONFIG) {
- prism2_release((u_long)link);
- }
+ prism2_release((u_long)link);
/* release net devices */
if (link->priv) {
@@ -547,7 +532,6 @@ static void prism2_detach(struct pcmcia_device *p_dev)
prism2_free_local_data(dev);
kfree(hw_priv);
}
- kfree(link);
}
@@ -558,7 +542,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
do { int ret = (retf); \
if (ret != 0) { \
PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", ret); \
- cs_error(link->handle, fn, ret); \
+ cs_error(link, fn, ret); \
goto next_entry; \
} \
} while (0)
@@ -566,7 +550,7 @@ if (ret != 0) { \
/* run after a CARD_INSERTION event is received to configure the PCMCIA
* socket and make the device available to the system */
-static int prism2_config(dev_link_t *link)
+static int prism2_config(struct pcmcia_device *link)
{
struct net_device *dev;
struct hostap_interface *iface;
@@ -595,27 +579,24 @@ static int prism2_config(dev_link_t *link)
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link->handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link->handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(link->handle, &tuple, parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
link->conf.ConfigBase = parse->config.base;
link->conf.Present = parse->config.rmask[0];
CS_CHECK(GetConfigurationInfo,
- pcmcia_get_configuration_info(link->handle, &conf));
- PDEBUG(DEBUG_HW, "%s: %s Vcc=%d (from config)\n", dev_info,
- ignore_cis_vcc ? "ignoring" : "setting", conf.Vcc);
- link->conf.Vcc = conf.Vcc;
+ pcmcia_get_configuration_info(link, &conf));
/* Look for an appropriate configuration table entry in the CIS */
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link->handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
for (;;) {
cistpl_cftable_entry_t *cfg = &(parse->cftable_entry);
CFG_CHECK2(GetTupleData,
- pcmcia_get_tuple_data(link->handle, &tuple));
+ pcmcia_get_tuple_data(link, &tuple));
CFG_CHECK2(ParseTuple,
- pcmcia_parse_tuple(link->handle, &tuple, parse));
+ pcmcia_parse_tuple(link, &tuple, parse));
if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
dflt = *cfg;
@@ -650,10 +631,10 @@ static int prism2_config(dev_link_t *link)
}
if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
/* Do we need to allocate an interrupt? */
@@ -695,19 +676,19 @@ static int prism2_config(dev_link_t *link)
/* This reserves IO space but doesn't actually enable it */
CFG_CHECK2(RequestIO,
- pcmcia_request_io(link->handle, &link->io));
+ pcmcia_request_io(link, &link->io));
/* This configuration table entry is OK */
break;
next_entry:
CS_CHECK(GetNextTuple,
- pcmcia_get_next_tuple(link->handle, &tuple));
+ pcmcia_get_next_tuple(link, &tuple));
}
/* Need to allocate net_device before requesting IRQ handler */
dev = prism2_init_local_data(&prism2_pccard_funcs, 0,
- &handle_to_dev(link->handle));
+ &handle_to_dev(link));
if (dev == NULL)
goto failed;
link->priv = dev;
@@ -717,7 +698,7 @@ static int prism2_config(dev_link_t *link)
local->hw_priv = hw_priv;
hw_priv->link = link;
strcpy(hw_priv->node.dev_name, dev->name);
- link->dev = &hw_priv->node;
+ link->dev_node = &hw_priv->node;
/*
* Allocate an interrupt line. Note that this does not assign a
@@ -730,7 +711,7 @@ static int prism2_config(dev_link_t *link)
link->irq.Handler = prism2_interrupt;
link->irq.Instance = dev;
CS_CHECK(RequestIRQ,
- pcmcia_request_irq(link->handle, &link->irq));
+ pcmcia_request_irq(link, &link->irq));
}
/*
@@ -739,18 +720,17 @@ static int prism2_config(dev_link_t *link)
* card and host interface into "Memory and IO" mode.
*/
CS_CHECK(RequestConfiguration,
- pcmcia_request_configuration(link->handle, &link->conf));
+ pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
- dev_info, link->conf.ConfigIndex,
- link->conf.Vcc / 10, link->conf.Vcc % 10);
- if (link->conf.Vpp1)
- printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
- link->conf.Vpp1 % 10);
+ printk(KERN_INFO "%s: index 0x%02x: ",
+ dev_info, link->conf.ConfigIndex);
+ if (link->conf.Vpp)
+ printk(", Vpp %d.%d", link->conf.Vpp / 10,
+ link->conf.Vpp % 10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq.AssignedIRQ);
if (link->io.NumPorts1)
@@ -761,9 +741,6 @@ static int prism2_config(dev_link_t *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- link->state |= DEV_CONFIG;
- link->state &= ~DEV_CONFIG_PENDING;
-
local->shutdown = 0;
sandisk_enable_wireless(dev);
@@ -778,7 +755,7 @@ static int prism2_config(dev_link_t *link)
return ret;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
kfree(parse);
@@ -790,7 +767,7 @@ static int prism2_config(dev_link_t *link)
static void prism2_release(u_long arg)
{
- dev_link_t *link = (dev_link_t *)arg;
+ struct pcmcia_device *link = (struct pcmcia_device *)arg;
PDEBUG(DEBUG_FLOW, "prism2_release\n");
@@ -799,71 +776,54 @@ static void prism2_release(u_long arg)
struct hostap_interface *iface;
iface = netdev_priv(dev);
- if (link->state & DEV_CONFIG)
- prism2_hw_shutdown(dev, 0);
+ prism2_hw_shutdown(dev, 0);
iface->local->shutdown = 1;
}
- if (link->win)
- pcmcia_release_window(link->win);
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- if (link->irq.AssignedIRQ)
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
-
+ pcmcia_disable_device(link);
PDEBUG(DEBUG_FLOW, "release - done\n");
}
-static int hostap_cs_suspend(struct pcmcia_device *p_dev)
+static int hostap_cs_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = (struct net_device *) link->priv;
int dev_open = 0;
+ struct hostap_interface *iface = NULL;
- PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
-
- link->state |= DEV_SUSPEND;
+ if (dev)
+ iface = netdev_priv(dev);
- if (link->state & DEV_CONFIG) {
- struct hostap_interface *iface = netdev_priv(dev);
- if (iface && iface->local)
- dev_open = iface->local->num_dev_open > 0;
- if (dev_open) {
- netif_stop_queue(dev);
- netif_device_detach(dev);
- }
- prism2_suspend(dev);
- pcmcia_release_configuration(link->handle);
+ PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
+ if (iface && iface->local)
+ dev_open = iface->local->num_dev_open > 0;
+ if (dev_open) {
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
}
+ prism2_suspend(dev);
return 0;
}
-static int hostap_cs_resume(struct pcmcia_device *p_dev)
+static int hostap_cs_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = (struct net_device *) link->priv;
int dev_open = 0;
+ struct hostap_interface *iface = NULL;
- PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
+ if (dev)
+ iface = netdev_priv(dev);
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- struct hostap_interface *iface = netdev_priv(dev);
- if (iface && iface->local)
- dev_open = iface->local->num_dev_open > 0;
+ PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
- pcmcia_request_configuration(link->handle, &link->conf);
+ if (iface && iface->local)
+ dev_open = iface->local->num_dev_open > 0;
- prism2_hw_shutdown(dev, 1);
- prism2_hw_config(dev, dev_open ? 0 : 1);
- if (dev_open) {
- netif_device_attach(dev);
- netif_start_queue(dev);
- }
+ prism2_hw_shutdown(dev, 1);
+ prism2_hw_config(dev, dev_open ? 0 : 1);
+ if (dev_open) {
+ netif_device_attach(dev);
+ netif_start_queue(dev);
}
return 0;
@@ -930,7 +890,7 @@ static struct pcmcia_driver hostap_driver = {
.drv = {
.name = "hostap_cs",
},
- .probe = prism2_attach,
+ .probe = hostap_cs_probe,
.remove = prism2_detach,
.owner = THIS_MODULE,
.id_table = hostap_cs_ids,
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 9dce522526c5c..bca89cff85a63 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -5573,8 +5573,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
case IEEE80211_52GHZ_BAND:
network->mode = IEEE_A;
i = ieee80211_channel_to_index(priv->ieee, priv->channel);
- if (i == -1)
- BUG();
+ BUG_ON(i == -1);
if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
IPW_WARNING("Overriding invalid channel\n");
priv->channel = geo->a[0].channel;
@@ -5587,8 +5586,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
else
network->mode = IEEE_B;
i = ieee80211_channel_to_index(priv->ieee, priv->channel);
- if (i == -1)
- BUG();
+ BUG_ON(i == -1);
if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
IPW_WARNING("Overriding invalid channel\n");
priv->channel = geo->bg[0].channel;
@@ -6715,8 +6713,7 @@ static int ipw_qos_association(struct ipw_priv *priv,
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC:
- if (!(network->capability & WLAN_CAPABILITY_IBSS))
- BUG();
+ BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
qos_data = &ibss_data;
break;
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 75ce6ddb0cf5f..9343d970537be 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -190,8 +190,8 @@ module_param(mem_speed, int, 0);
/*====================================================================*/
/* PCMCIA (Card Services) related functions */
-static void netwave_release(dev_link_t *link); /* Card removal */
-static void netwave_pcmcia_config(dev_link_t *arg); /* Runs after card
+static void netwave_release(struct pcmcia_device *link); /* Card removal */
+static int netwave_pcmcia_config(struct pcmcia_device *arg); /* Runs after card
insertion */
static void netwave_detach(struct pcmcia_device *p_dev); /* Destroy instance */
@@ -221,10 +221,10 @@ static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
/*
- A dev_link_t structure has fields for most things that are needed
+ A struct pcmcia_device structure has fields for most things that are needed
to keep track of a socket, but there will usually be some device
specific information that also needs to be kept track of. The
- 'priv' pointer in a dev_link_t structure can be used to point to
+ 'priv' pointer in a struct pcmcia_device structure can be used to point to
a device-specific private data structure, like this.
A driver needs to provide a dev_node_t structure for each device
@@ -232,7 +232,7 @@ static void set_multicast_list(struct net_device *dev);
example, ethernet cards, modems). In other cases, there may be
many actual or logical devices (SCSI adapters, memory cards with
multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a dev_link_t
+ in a linked list starting at the 'dev' field of a struct pcmcia_device
structure. We allocate them in the card's private data structure,
because they generally can't be allocated dynamically.
*/
@@ -268,7 +268,7 @@ struct site_survey {
};
typedef struct netwave_private {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
dev_node_t node;
u_char __iomem *ramBase;
@@ -376,20 +376,19 @@ static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
* configure the card at this point -- we wait until we receive a
* card insertion event.
*/
-static int netwave_attach(struct pcmcia_device *p_dev)
+static int netwave_probe(struct pcmcia_device *link)
{
- dev_link_t *link;
struct net_device *dev;
netwave_private *priv;
DEBUG(0, "netwave_attach()\n");
- /* Initialize the dev_link_t structure */
+ /* Initialize the struct pcmcia_device structure */
dev = alloc_etherdev(sizeof(netwave_private));
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
- link = &priv->link;
+ priv->p_dev = link;
link->priv = dev;
/* The io structure describes IO port mapping */
@@ -406,7 +405,6 @@ static int netwave_attach(struct pcmcia_device *p_dev)
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
@@ -430,13 +428,7 @@ static int netwave_attach(struct pcmcia_device *p_dev)
dev->stop = &netwave_close;
link->irq.Instance = dev;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- netwave_pcmcia_config( link);
-
- return 0;
+ return netwave_pcmcia_config( link);
} /* netwave_attach */
/*
@@ -447,17 +439,15 @@ static int netwave_attach(struct pcmcia_device *p_dev)
* structures are freed. Otherwise, the structures will be freed
* when the device is released.
*/
-static void netwave_detach(struct pcmcia_device *p_dev)
+static void netwave_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
DEBUG(0, "netwave_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG)
- netwave_release(link);
+ netwave_release(link);
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
free_netdev(dev);
@@ -743,8 +733,7 @@ static const struct iw_handler_def netwave_handler_def =
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void netwave_pcmcia_config(dev_link_t *link) {
- client_handle_t handle = link->handle;
+static int netwave_pcmcia_config(struct pcmcia_device *link) {
struct net_device *dev = link->priv;
netwave_private *priv = netdev_priv(dev);
tuple_t tuple;
@@ -766,15 +755,12 @@ static void netwave_pcmcia_config(dev_link_t *link) {
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/*
* Try allocating IO ports. This tries a few fixed addresses.
* If you want, you can also read the card's config table to
@@ -782,11 +768,11 @@ static void netwave_pcmcia_config(dev_link_t *link) {
*/
for (i = j = 0x0; j < 0x400; j += 0x20) {
link->io.BasePort1 = j ^ 0x300;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
goto failed;
}
@@ -794,16 +780,16 @@ static void netwave_pcmcia_config(dev_link_t *link) {
* Now allocate an interrupt line. Note that this does not
* actually assign a handler to the interrupt.
*/
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
/*
* This actually configures the PCMCIA socket -- setting up
* the I/O windows and the interrupt mapping.
*/
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
/*
- * Allocate a 32K memory window. Note that the dev_link_t
+ * Allocate a 32K memory window. Note that the struct pcmcia_device
* structure provides space for one window handle -- if your
* device needs several windows, you'll need to keep track of
* the handles in your private data structure, dev->priv.
@@ -813,7 +799,7 @@ static void netwave_pcmcia_config(dev_link_t *link) {
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
req.Base = 0; req.Size = 0x8000;
req.AccessSpeed = mem_speed;
- CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win));
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win));
mem.CardOffset = 0x20000; mem.Page = 0;
CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem));
@@ -823,7 +809,7 @@ static void netwave_pcmcia_config(dev_link_t *link) {
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
if (register_netdev(dev) != 0) {
printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n");
@@ -831,8 +817,7 @@ static void netwave_pcmcia_config(dev_link_t *link) {
}
strcpy(priv->node.dev_name, dev->name);
- link->dev = &priv->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &priv->node;
/* Reset card before reading physical address */
netwave_doreset(dev->base_addr, ramBase);
@@ -852,12 +837,13 @@ static void netwave_pcmcia_config(dev_link_t *link) {
printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n",
get_uint16(ramBase + NETWAVE_EREG_ARW),
get_uint16(ramBase + NETWAVE_EREG_ARW+2));
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
netwave_release(link);
+ return -ENODEV;
} /* netwave_pcmcia_config */
/*
@@ -867,52 +853,35 @@ failed:
* device, and release the PCMCIA configuration. If the device is
* still open, this will be postponed until it is closed.
*/
-static void netwave_release(dev_link_t *link)
+static void netwave_release(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
- netwave_private *priv = netdev_priv(dev);
-
- DEBUG(0, "netwave_release(0x%p)\n", link);
+ struct net_device *dev = link->priv;
+ netwave_private *priv = netdev_priv(dev);
- /* Don't bother checking to see if these succeed or not */
- if (link->win) {
- iounmap(priv->ramBase);
- pcmcia_release_window(link->win);
- }
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
+ DEBUG(0, "netwave_release(0x%p)\n", link);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
+ if (link->win)
+ iounmap(priv->ramBase);
}
-static int netwave_suspend(struct pcmcia_device *p_dev)
+static int netwave_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int netwave_resume(struct pcmcia_device *p_dev)
+static int netwave_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- netwave_reset(dev);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ netwave_reset(dev);
+ netif_device_attach(dev);
}
return 0;
@@ -1119,7 +1088,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id, struct pt_regs *regs
u_char __iomem *ramBase;
struct net_device *dev = (struct net_device *)dev_id;
struct netwave_private *priv = netdev_priv(dev);
- dev_link_t *link = &priv->link;
+ struct pcmcia_device *link = priv->p_dev;
int i;
if (!netif_device_present(dev))
@@ -1138,7 +1107,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id, struct pt_regs *regs
status = inb(iobase + NETWAVE_REG_ASR);
- if (!DEV_OK(link)) {
+ if (!pcmcia_dev_present(link)) {
DEBUG(1, "netwave_interrupt: Interrupt with status 0x%x "
"from removed or suspended card!\n", status);
break;
@@ -1373,11 +1342,11 @@ static int netwave_rx(struct net_device *dev)
static int netwave_open(struct net_device *dev) {
netwave_private *priv = netdev_priv(dev);
- dev_link_t *link = &priv->link;
+ struct pcmcia_device *link = priv->p_dev;
DEBUG(1, "netwave_open: starting.\n");
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
@@ -1390,7 +1359,7 @@ static int netwave_open(struct net_device *dev) {
static int netwave_close(struct net_device *dev) {
netwave_private *priv = netdev_priv(dev);
- dev_link_t *link = &priv->link;
+ struct pcmcia_device *link = priv->p_dev;
DEBUG(1, "netwave_close: finishing.\n");
@@ -1411,7 +1380,7 @@ static struct pcmcia_driver netwave_driver = {
.drv = {
.name = "netwave_cs",
},
- .probe = netwave_attach,
+ .probe = netwave_probe,
.remove = netwave_detach,
.id_table = netwave_ids,
.suspend = netwave_suspend,
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index ec6f2a48895b5..434f7d7ad8416 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -49,7 +49,7 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
/* PCMCIA specific device information (goes in the card field of
* struct orinoco_private */
struct orinoco_pccard {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
/* Used to handle hard reset */
@@ -63,8 +63,8 @@ struct orinoco_pccard {
/* Function prototypes */
/********************************************************************/
-static void orinoco_cs_config(dev_link_t *link);
-static void orinoco_cs_release(dev_link_t *link);
+static int orinoco_cs_config(struct pcmcia_device *link);
+static void orinoco_cs_release(struct pcmcia_device *link);
static void orinoco_cs_detach(struct pcmcia_device *p_dev);
/********************************************************************/
@@ -75,13 +75,13 @@ static int
orinoco_cs_hard_reset(struct orinoco_private *priv)
{
struct orinoco_pccard *card = priv->card;
- dev_link_t *link = &card->link;
+ struct pcmcia_device *link = card->p_dev;
int err;
/* We need atomic ops here, because we're not holding the lock */
set_bit(0, &card->hard_reset_in_progress);
- err = pcmcia_reset_card(link->handle, NULL);
+ err = pcmcia_reset_card(link, NULL);
if (err)
return err;
@@ -104,12 +104,11 @@ orinoco_cs_hard_reset(struct orinoco_private *priv)
* configure the card at this point -- we wait until we receive a card
* insertion event. */
static int
-orinoco_cs_attach(struct pcmcia_device *p_dev)
+orinoco_cs_probe(struct pcmcia_device *link)
{
struct net_device *dev;
struct orinoco_private *priv;
struct orinoco_pccard *card;
- dev_link_t *link;
dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset);
if (! dev)
@@ -118,7 +117,7 @@ orinoco_cs_attach(struct pcmcia_device *p_dev)
card = priv->card;
/* Link both structures together */
- link = &card->link;
+ card->p_dev = link;
link->priv = dev;
/* Interrupt setup */
@@ -135,16 +134,7 @@ orinoco_cs_attach(struct pcmcia_device *p_dev)
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
- /* Register with Card Services */
- link->next = NULL;
-
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- orinoco_cs_config(link);
-
- return 0;
+ return orinoco_cs_config(link);
} /* orinoco_cs_attach */
/*
@@ -153,16 +143,14 @@ orinoco_cs_attach(struct pcmcia_device *p_dev)
* are freed. Otherwise, the structures will be freed when the device
* is released.
*/
-static void orinoco_cs_detach(struct pcmcia_device *p_dev)
+static void orinoco_cs_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- if (link->state & DEV_CONFIG)
- orinoco_cs_release(link);
+ orinoco_cs_release(link);
- DEBUG(0, PFX "detach: link=%p link->dev=%p\n", link, link->dev);
- if (link->dev) {
+ DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
+ if (link->dev_node) {
DEBUG(0, PFX "About to unregister net device %p\n",
dev);
unregister_netdev(dev);
@@ -180,11 +168,10 @@ static void orinoco_cs_detach(struct pcmcia_device *p_dev)
last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; \
} while (0)
-static void
-orinoco_cs_config(dev_link_t *link)
+static int
+orinoco_cs_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- client_handle_t handle = link->handle;
struct orinoco_private *priv = netdev_priv(dev);
struct orinoco_pccard *card = priv->card;
hermes_t *hw = &priv->hw;
@@ -196,7 +183,7 @@ orinoco_cs_config(dev_link_t *link)
cisparse_t parse;
void __iomem *mem;
- CS_CHECK(ValidateCIS, pcmcia_validate_cis(handle, &info));
+ CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
/*
* This reads the card's CONFIG tuple to find its
@@ -207,19 +194,15 @@ orinoco_cs_config(dev_link_t *link)
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/* Look up the current Vcc */
CS_CHECK(GetConfigurationInfo,
- pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
+ pcmcia_get_configuration_info(link, &conf));
/*
* In this loop, we scan the CIS for configuration table
@@ -236,13 +219,13 @@ orinoco_cs_config(dev_link_t *link)
* implementation-defined details.
*/
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
cistpl_cftable_entry_t dflt = { .index = 0 };
- if ( (pcmcia_get_tuple_data(handle, &tuple) != 0)
- || (pcmcia_parse_tuple(handle, &tuple, &parse) != 0))
+ if ( (pcmcia_get_tuple_data(link, &tuple) != 0)
+ || (pcmcia_parse_tuple(link, &tuple, &parse) != 0))
goto next_entry;
if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
@@ -274,10 +257,10 @@ orinoco_cs_config(dev_link_t *link)
}
if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
/* Do we need to allocate an interrupt? */
@@ -307,7 +290,7 @@ orinoco_cs_config(dev_link_t *link)
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
}
@@ -317,9 +300,8 @@ orinoco_cs_config(dev_link_t *link)
break;
next_entry:
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- last_ret = pcmcia_get_next_tuple(handle, &tuple);
+ pcmcia_disable_device(link);
+ last_ret = pcmcia_get_next_tuple(link, &tuple);
if (last_ret == CS_NO_MORE_ITEMS) {
printk(KERN_ERR PFX "GetNextTuple(): No matching "
"CIS configuration. Maybe you need the "
@@ -333,7 +315,7 @@ orinoco_cs_config(dev_link_t *link)
* a handler to the interrupt, unless the 'Handler' member of
* the irq structure is initialized.
*/
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
/* We initialize the hermes structure before completing PCMCIA
* configuration just in case the interrupt handler gets
@@ -350,7 +332,7 @@ orinoco_cs_config(dev_link_t *link)
* card and host interface into "Memory and IO" mode.
*/
CS_CHECK(RequestConfiguration,
- pcmcia_request_configuration(link->handle, &link->conf));
+ pcmcia_request_configuration(link, &link->conf));
/* Ok, we have the configuration, prepare to register the netdev */
dev->base_addr = link->io.BasePort1;
@@ -358,7 +340,7 @@ orinoco_cs_config(dev_link_t *link)
SET_MODULE_OWNER(dev);
card->node.major = card->node.minor = 0;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
/* Tell the stack we exist */
if (register_netdev(dev) != 0) {
printk(KERN_ERR PFX "register_netdev() failed\n");
@@ -366,20 +348,18 @@ orinoco_cs_config(dev_link_t *link)
}
/* At this point, the dev_node_t structure(s) needs to be
- * initialized and arranged in a linked list at link->dev. */
+ * initialized and arranged in a linked list at link->dev_node. */
strcpy(card->node.dev_name, dev->name);
- link->dev = &card->node; /* link->dev being non-NULL is also
+ link->dev_node = &card->node; /* link->dev_node being non-NULL is also
used to indicate that the
net_device has been registered */
- link->state &= ~DEV_CONFIG_PENDING;
/* Finally, report what we've done */
- printk(KERN_DEBUG "%s: index 0x%02x: Vcc %d.%d",
- dev->name, link->conf.ConfigIndex,
- link->conf.Vcc / 10, link->conf.Vcc % 10);
- if (link->conf.Vpp1)
- printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
- link->conf.Vpp1 % 10);
+ printk(KERN_DEBUG "%s: index 0x%02x: ",
+ dev->name, link->conf.ConfigIndex);
+ if (link->conf.Vpp)
+ printk(", Vpp %d.%d", link->conf.Vpp / 10,
+ link->conf.Vpp % 10);
printk(", irq %d", link->irq.AssignedIRQ);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
@@ -389,13 +369,14 @@ orinoco_cs_config(dev_link_t *link)
link->io.BasePort2 + link->io.NumPorts2 - 1);
printk("\n");
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
orinoco_cs_release(link);
+ return -ENODEV;
} /* orinoco_cs_config */
/*
@@ -404,7 +385,7 @@ orinoco_cs_config(dev_link_t *link)
* still open, this will be postponed until it is closed.
*/
static void
-orinoco_cs_release(dev_link_t *link)
+orinoco_cs_release(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct orinoco_private *priv = netdev_priv(dev);
@@ -416,88 +397,68 @@ orinoco_cs_release(dev_link_t *link)
priv->hw_unavailable++;
spin_unlock_irqrestore(&priv->lock, flags);
- /* Don't bother checking to see if these succeed or not */
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- if (link->irq.AssignedIRQ)
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
if (priv->hw.iobase)
ioport_unmap(priv->hw.iobase);
} /* orinoco_cs_release */
-static int orinoco_cs_suspend(struct pcmcia_device *p_dev)
+static int orinoco_cs_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
struct orinoco_private *priv = netdev_priv(dev);
struct orinoco_pccard *card = priv->card;
int err = 0;
unsigned long flags;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- /* This is probably racy, but I can't think of
- a better way, short of rewriting the PCMCIA
- layer to not suck :-( */
- if (! test_bit(0, &card->hard_reset_in_progress)) {
- spin_lock_irqsave(&priv->lock, flags);
+ /* This is probably racy, but I can't think of
+ a better way, short of rewriting the PCMCIA
+ layer to not suck :-( */
+ if (! test_bit(0, &card->hard_reset_in_progress)) {
+ spin_lock_irqsave(&priv->lock, flags);
- err = __orinoco_down(dev);
- if (err)
- printk(KERN_WARNING "%s: Error %d downing interface\n",
- dev->name, err);
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: Error %d downing interface\n",
+ dev->name, err);
- netif_device_detach(dev);
- priv->hw_unavailable++;
+ netif_device_detach(dev);
+ priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- pcmcia_release_configuration(link->handle);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
}
-static int orinoco_cs_resume(struct pcmcia_device *p_dev)
+static int orinoco_cs_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
struct orinoco_private *priv = netdev_priv(dev);
struct orinoco_pccard *card = priv->card;
int err = 0;
unsigned long flags;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- /* FIXME: should we double check that this is
- * the same card as we had before */
- pcmcia_request_configuration(link->handle, &link->conf);
-
- if (! test_bit(0, &card->hard_reset_in_progress)) {
- err = orinoco_reinit_firmware(dev);
- if (err) {
- printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
- dev->name, err);
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
+ if (! test_bit(0, &card->hard_reset_in_progress)) {
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
+ dev->name, err);
+ return -EIO;
+ }
- netif_device_attach(dev);
- priv->hw_unavailable--;
+ spin_lock_irqsave(&priv->lock, flags);
- if (priv->open && ! priv->hw_unavailable) {
- err = __orinoco_up(dev);
- if (err)
- printk(KERN_ERR "%s: Error %d restarting card\n",
- dev->name, err);
- }
+ netif_device_attach(dev);
+ priv->hw_unavailable--;
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (priv->open && ! priv->hw_unavailable) {
+ err = __orinoco_up(dev);
+ if (err)
+ printk(KERN_ERR "%s: Error %d restarting card\n",
+ dev->name, err);
}
+
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
@@ -604,7 +565,7 @@ static struct pcmcia_driver orinoco_driver = {
.drv = {
.name = DRIVER_NAME,
},
- .probe = orinoco_cs_attach,
+ .probe = orinoco_cs_probe,
.remove = orinoco_cs_detach,
.id_table = orinoco_cs_ids,
.suspend = orinoco_cs_suspend,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 7880d8c31aadc..879eb427607ca 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -90,8 +90,8 @@ module_param(pc_debug, int, 0);
#define DEBUG(n, args...)
#endif
/** Prototypes based on PCMCIA skeleton driver *******************************/
-static void ray_config(dev_link_t *link);
-static void ray_release(dev_link_t *link);
+static int ray_config(struct pcmcia_device *link);
+static void ray_release(struct pcmcia_device *link);
static void ray_detach(struct pcmcia_device *p_dev);
/***** Prototypes indicated by device structure ******************************/
@@ -190,20 +190,17 @@ static int bc;
static char *phy_addr = NULL;
-/* A linked list of "instances" of the ray device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one dev_link_t structure (defined in ds.h).
-*/
-static dev_link_t *dev_list = NULL;
-
-/* A dev_link_t structure has fields for most things that are needed
+/* A struct pcmcia_device structure has fields for most things that are needed
to keep track of a socket, but there will usually be some device
specific information that also needs to be kept track of. The
- 'priv' pointer in a dev_link_t structure can be used to point to
+ 'priv' pointer in a struct pcmcia_device structure can be used to point to
a device-specific private data structure, like this.
*/
static unsigned int ray_mem_speed = 500;
+/* WARNING: THIS DRIVER IS NOT CAPABLE OF HANDLING MULTIPLE DEVICES! */
+static struct pcmcia_device *this_device = NULL;
+
MODULE_AUTHOR("Corey Thomas <corey@world.std.com>");
MODULE_DESCRIPTION("Raylink/WebGear wireless LAN driver");
MODULE_LICENSE("GPL");
@@ -306,56 +303,46 @@ static char rcsid[] = "Raylink/WebGear wireless LAN - Corey <Thomas corey@world.
configure the card at this point -- we wait until we receive a
card insertion event.
=============================================================================*/
-static int ray_attach(struct pcmcia_device *p_dev)
+static int ray_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
ray_dev_t *local;
struct net_device *dev;
-
- DEBUG(1, "ray_attach()\n");
- /* Initialize the dev_link_t structure */
- link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
-
- if (!link)
- return -ENOMEM;
+ DEBUG(1, "ray_attach()\n");
/* Allocate space for private device-specific data */
dev = alloc_etherdev(sizeof(ray_dev_t));
-
if (!dev)
goto fail_alloc_dev;
local = dev->priv;
-
- memset(link, 0, sizeof(struct dev_link_t));
+ local->finder = p_dev;
/* The io structure describes IO port mapping. None used here */
- link->io.NumPorts1 = 0;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.IOAddrLines = 5;
+ p_dev->io.NumPorts1 = 0;
+ p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ p_dev->io.IOAddrLines = 5;
/* Interrupt setup. For PCMCIA, driver takes what's given */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
- link->irq.Handler = &ray_interrupt;
+ p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ p_dev->irq.Handler = &ray_interrupt;
/* General socket configuration */
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->conf.ConfigIndex = 1;
- link->conf.Present = PRESENT_OPTION;
-
- link->priv = dev;
- link->irq.Instance = dev;
+ p_dev->conf.Attributes = CONF_ENABLE_IRQ;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
+ p_dev->conf.ConfigIndex = 1;
+ p_dev->conf.Present = PRESENT_OPTION;
+
+ p_dev->priv = dev;
+ p_dev->irq.Instance = dev;
- local->finder = link;
+ local->finder = p_dev;
local->card_status = CARD_INSERTED;
local->authentication_state = UNAUTHENTICATED;
local->num_multi = 0;
- DEBUG(2,"ray_attach link = %p, dev = %p, local = %p, intr = %p\n",
- link,dev,local,&ray_interrupt);
+ DEBUG(2,"ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n",
+ p_dev,dev,local,&ray_interrupt);
/* Raylink entries in the device structure */
dev->hard_start_xmit = &ray_dev_start_xmit;
@@ -379,16 +366,10 @@ static int ray_attach(struct pcmcia_device *p_dev)
init_timer(&local->timer);
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- ray_config(link);
-
- return 0;
+ this_device = p_dev;
+ return ray_config(p_dev);
fail_alloc_dev:
- kfree(link);
return -ENOMEM;
} /* ray_attach */
/*=============================================================================
@@ -397,37 +378,25 @@ fail_alloc_dev:
structures are freed. Otherwise, the structures will be freed
when the device is released.
=============================================================================*/
-static void ray_detach(struct pcmcia_device *p_dev)
+static void ray_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
- dev_link_t **linkp;
struct net_device *dev;
ray_dev_t *local;
DEBUG(1, "ray_detach(0x%p)\n", link);
-
- /* Locate device structure */
- for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
- if (*linkp == link) break;
- if (*linkp == NULL)
- return;
+ this_device = NULL;
dev = link->priv;
- if (link->state & DEV_CONFIG) {
- ray_release(link);
+ ray_release(link);
- local = (ray_dev_t *)dev->priv;
- del_timer(&local->timer);
- }
+ local = (ray_dev_t *)dev->priv;
+ del_timer(&local->timer);
- /* Unlink device structure, free pieces */
- *linkp = link->next;
if (link->priv) {
- if (link->dev) unregister_netdev(dev);
+ if (link->dev_node) unregister_netdev(dev);
free_netdev(dev);
}
- kfree(link);
DEBUG(2,"ray_cs ray_detach ending\n");
} /* ray_detach */
/*=============================================================================
@@ -438,9 +407,8 @@ static void ray_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
#define MAX_TUPLE_SIZE 128
-static void ray_config(dev_link_t *link)
+static int ray_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
tuple_t tuple;
cisparse_t parse;
int last_fn = 0, last_ret = 0;
@@ -455,48 +423,45 @@ static void ray_config(dev_link_t *link)
/* This reads the card's CONFIG tuple to find its configuration regs */
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
tuple.TupleData = buf;
tuple.TupleDataMax = MAX_TUPLE_SIZE;
tuple.TupleOffset = 0;
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
/* Determine card type and firmware version */
buf[0] = buf[MAX_TUPLE_SIZE - 1] = 0;
tuple.DesiredTuple = CISTPL_VERS_1;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
tuple.TupleData = buf;
tuple.TupleDataMax = MAX_TUPLE_SIZE;
tuple.TupleOffset = 2;
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
for (i=0; i<tuple.TupleDataLen - 4; i++)
if (buf[i] == 0) buf[i] = ' ';
printk(KERN_INFO "ray_cs Detected: %s\n",buf);
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/* Now allocate an interrupt line. Note that this does not
actually assign a handler to the interrupt.
*/
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
dev->irq = link->irq.AssignedIRQ;
/* This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping.
*/
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
/*** Set up 32k window for shared memory (transmit and control) ************/
req.Attributes = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT;
req.Base = 0;
req.Size = 0x8000;
req.AccessSpeed = ray_mem_speed;
- CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win));
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win));
mem.CardOffset = 0x0000; mem.Page = 0;
CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem));
local->sram = ioremap(req.Base,req.Size);
@@ -506,7 +471,7 @@ static void ray_config(dev_link_t *link)
req.Base = 0;
req.Size = 0x4000;
req.AccessSpeed = ray_mem_speed;
- CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &local->rmem_handle));
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &local->rmem_handle));
mem.CardOffset = 0x8000; mem.Page = 0;
CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->rmem_handle, &mem));
local->rmem = ioremap(req.Base,req.Size);
@@ -516,7 +481,7 @@ static void ray_config(dev_link_t *link)
req.Base = 0;
req.Size = 0x1000;
req.AccessSpeed = ray_mem_speed;
- CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &local->amem_handle));
+ CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &local->amem_handle));
mem.CardOffset = 0x0000; mem.Page = 0;
CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->amem_handle, &mem));
local->amem = ioremap(req.Base,req.Size);
@@ -526,32 +491,32 @@ static void ray_config(dev_link_t *link)
DEBUG(3,"ray_config amem=%p\n",local->amem);
if (ray_init(dev) < 0) {
ray_release(link);
- return;
+ return -ENODEV;
}
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
i = register_netdev(dev);
if (i != 0) {
printk("ray_config register_netdev() failed\n");
ray_release(link);
- return;
+ return i;
}
strcpy(local->node.dev_name, dev->name);
- link->dev = &local->node;
+ link->dev_node = &local->node;
- link->state &= ~DEV_CONFIG_PENDING;
printk(KERN_INFO "%s: RayLink, irq %d, hw_addr ",
dev->name, dev->irq);
for (i = 0; i < 6; i++)
printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
ray_release(link);
+ return -ENODEV;
} /* ray_config */
static inline struct ccs __iomem *ccs_base(ray_dev_t *dev)
@@ -578,9 +543,9 @@ static int ray_init(struct net_device *dev)
UCHAR *p;
struct ccs __iomem *pccs;
ray_dev_t *local = (ray_dev_t *)dev->priv;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
DEBUG(1, "ray_init(0x%p)\n", dev);
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(0,"ray_init - device not present\n");
return -1;
}
@@ -640,10 +605,10 @@ static int dl_startup_params(struct net_device *dev)
int ccsindex;
ray_dev_t *local = (ray_dev_t *)dev->priv;
struct ccs __iomem *pccs;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
DEBUG(1,"dl_startup_params entered\n");
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs dl_startup_params - device not present\n");
return -1;
}
@@ -747,9 +712,9 @@ static void verify_dl_startup(u_long data)
ray_dev_t *local = (ray_dev_t *)data;
struct ccs __iomem *pccs = ccs_base(local) + local->dl_param_ccs;
UCHAR status;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs verify_dl_startup - device not present\n");
return;
}
@@ -787,8 +752,8 @@ static void start_net(u_long data)
ray_dev_t *local = (ray_dev_t *)data;
struct ccs __iomem *pccs;
int ccsindex;
- dev_link_t *link = local->finder;
- if (!(link->state & DEV_PRESENT)) {
+ struct pcmcia_device *link = local->finder;
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs start_net - device not present\n");
return;
}
@@ -814,9 +779,9 @@ static void join_net(u_long data)
struct ccs __iomem *pccs;
int ccsindex;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs join_net - device not present\n");
return;
}
@@ -840,7 +805,7 @@ static void join_net(u_long data)
device, and release the PCMCIA configuration. If the device is
still open, this will be postponed until it is closed.
=============================================================================*/
-static void ray_release(dev_link_t *link)
+static void ray_release(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
ray_dev_t *local = dev->priv;
@@ -849,56 +814,38 @@ static void ray_release(dev_link_t *link)
DEBUG(1, "ray_release(0x%p)\n", link);
del_timer(&local->timer);
- link->state &= ~DEV_CONFIG;
iounmap(local->sram);
iounmap(local->rmem);
iounmap(local->amem);
/* Do bother checking to see if these succeed or not */
- i = pcmcia_release_window(link->win);
- if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(link->win) ret = %x\n",i);
i = pcmcia_release_window(local->amem_handle);
if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->amem) ret = %x\n",i);
i = pcmcia_release_window(local->rmem_handle);
if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->rmem) ret = %x\n",i);
- i = pcmcia_release_configuration(link->handle);
- if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseConfiguration ret = %x\n",i);
- i = pcmcia_release_irq(link->handle, &link->irq);
- if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseIRQ ret = %x\n",i);
+ pcmcia_disable_device(link);
DEBUG(2,"ray_release ending\n");
}
-static int ray_suspend(struct pcmcia_device *p_dev)
+static int ray_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
-
- pcmcia_release_configuration(link->handle);
- }
-
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int ray_resume(struct pcmcia_device *p_dev)
+static int ray_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- ray_reset(dev);
- netif_device_attach(dev);
- }
- }
+ if (link->open) {
+ ray_reset(dev);
+ netif_device_attach(dev);
+ }
return 0;
}
@@ -910,10 +857,10 @@ int ray_dev_init(struct net_device *dev)
int i;
#endif /* RAY_IMMEDIATE_INIT */
ray_dev_t *local = dev->priv;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
DEBUG(1,"ray_dev_init(dev=%p)\n",dev);
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_dev_init - device not present\n");
return -1;
}
@@ -944,10 +891,10 @@ int ray_dev_init(struct net_device *dev)
static int ray_dev_config(struct net_device *dev, struct ifmap *map)
{
ray_dev_t *local = dev->priv;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
/* Dummy routine to satisfy device structure */
DEBUG(1,"ray_dev_config(dev=%p,ifmap=%p)\n",dev,map);
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_dev_config - device not present\n");
return -1;
}
@@ -958,10 +905,10 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map)
static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
ray_dev_t *local = dev->priv;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
short length = skb->len;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_dev_start_xmit - device not present\n");
return -1;
}
@@ -1570,7 +1517,7 @@ static int ray_commit(struct net_device *dev,
static iw_stats * ray_get_wireless_stats(struct net_device * dev)
{
ray_dev_t * local = (ray_dev_t *) dev->priv;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
struct status __iomem *p = local->sram + STATUS_BASE;
if(local == (ray_dev_t *) NULL)
@@ -1588,7 +1535,7 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev)
}
#endif /* WIRELESS_SPY */
- if((link->state & DEV_PRESENT)) {
+ if(pcmcia_dev_present(link)) {
local->wstats.qual.noise = readb(&p->rxnoise);
local->wstats.qual.updated |= 4;
}
@@ -1657,18 +1604,14 @@ static const struct iw_handler_def ray_handler_def =
/*===========================================================================*/
static int ray_open(struct net_device *dev)
{
- dev_link_t *link;
ray_dev_t *local = (ray_dev_t *)dev->priv;
+ struct pcmcia_device *link;
+ link = local->finder;
DEBUG(1, "ray_open('%s')\n", dev->name);
- for (link = dev_list; link; link = link->next)
- if (link->priv == dev) break;
- if (!DEV_OK(link)) {
- return -ENODEV;
- }
-
- if (link->open == 0) local->num_multi = 0;
+ if (link->open == 0)
+ local->num_multi = 0;
link->open++;
/* If the card is not started, time to start it ! - Jean II */
@@ -1695,15 +1638,12 @@ static int ray_open(struct net_device *dev)
/*===========================================================================*/
static int ray_dev_close(struct net_device *dev)
{
- dev_link_t *link;
+ ray_dev_t *local = (ray_dev_t *)dev->priv;
+ struct pcmcia_device *link;
+ link = local->finder;
DEBUG(1, "ray_dev_close('%s')\n", dev->name);
- for (link = dev_list; link; link = link->next)
- if (link->priv == dev) break;
- if (link == NULL)
- return -ENODEV;
-
link->open--;
netif_stop_queue(dev);
@@ -1725,9 +1665,9 @@ static void ray_reset(struct net_device *dev) {
static int interrupt_ecf(ray_dev_t *local, int ccs)
{
int i = 50;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs interrupt_ecf - device not present\n");
return -1;
}
@@ -1752,9 +1692,9 @@ static int get_free_tx_ccs(ray_dev_t *local)
{
int i;
struct ccs __iomem *pccs = ccs_base(local);
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs get_free_tx_ccs - device not present\n");
return ECARDGONE;
}
@@ -1783,9 +1723,9 @@ static int get_free_ccs(ray_dev_t *local)
{
int i;
struct ccs __iomem *pccs = ccs_base(local);
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs get_free_ccs - device not present\n");
return ECARDGONE;
}
@@ -1858,9 +1798,9 @@ static int parse_addr(char *in_str, UCHAR *out)
static struct net_device_stats *ray_get_stats(struct net_device *dev)
{
ray_dev_t *local = (ray_dev_t *)dev->priv;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
struct status __iomem *p = local->sram + STATUS_BASE;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs net_device_stats - device not present\n");
return &local->stats;
}
@@ -1888,12 +1828,12 @@ static struct net_device_stats *ray_get_stats(struct net_device *dev)
static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len)
{
ray_dev_t *local = (ray_dev_t *)dev->priv;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
int ccsindex;
int i;
struct ccs __iomem *pccs;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_update_parm - device not present\n");
return;
}
@@ -1925,10 +1865,10 @@ static void ray_update_multi_list(struct net_device *dev, int all)
struct ccs __iomem *pccs;
int i = 0;
ray_dev_t *local = (ray_dev_t *)dev->priv;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
void __iomem *p = local->sram + HOST_TO_ECF_BASE;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_update_multi_list - device not present\n");
return;
}
@@ -2005,7 +1945,7 @@ static void set_multicast_list(struct net_device *dev)
static irqreturn_t ray_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
struct net_device *dev = (struct net_device *)dev_id;
- dev_link_t *link;
+ struct pcmcia_device *link;
ray_dev_t *local;
struct ccs __iomem *pccs;
struct rcs __iomem *prcs;
@@ -2020,8 +1960,8 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id, struct pt_regs * regs)
DEBUG(4,"ray_cs: interrupt for *dev=%p\n",dev);
local = (ray_dev_t *)dev->priv;
- link = (dev_link_t *)local->finder;
- if ( ! (link->state & DEV_PRESENT) || link->state & DEV_SUSPEND ) {
+ link = (struct pcmcia_device *)local->finder;
+ if (!pcmcia_dev_present(link)) {
DEBUG(2,"ray_cs interrupt from device not present or suspended.\n");
return IRQ_NONE;
}
@@ -2540,9 +2480,9 @@ static void release_frag_chain(ray_dev_t *local, struct rcs __iomem * prcs)
/*===========================================================================*/
static void authenticate(ray_dev_t *local)
{
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
DEBUG(0,"ray_cs Starting authentication.\n");
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs authenticate - device not present\n");
return;
}
@@ -2606,10 +2546,10 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
static void associate(ray_dev_t *local)
{
struct ccs __iomem *pccs;
- dev_link_t *link = local->finder;
+ struct pcmcia_device *link = local->finder;
struct net_device *dev = link->priv;
int ccsindex;
- if (!(link->state & DEV_PRESENT)) {
+ if (!(pcmcia_dev_present(link))) {
DEBUG(2,"ray_cs associate - device not present\n");
return;
}
@@ -2689,14 +2629,14 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len)
* eg ifconfig
*/
int i;
- dev_link_t *link;
+ struct pcmcia_device *link;
struct net_device *dev;
ray_dev_t *local;
UCHAR *p;
struct freq_hop_element *pfh;
UCHAR c[33];
- link = dev_list;
+ link = this_device;
if (!link)
return 0;
dev = (struct net_device *)link->priv;
@@ -2898,7 +2838,7 @@ static struct pcmcia_driver ray_driver = {
.drv = {
.name = "ray_cs",
},
- .probe = ray_attach,
+ .probe = ray_probe,
.remove = ray_detach,
.id_table = ray_ids,
.suspend = ray_suspend,
@@ -2940,7 +2880,6 @@ static void __exit exit_ray_cs(void)
#endif
pcmcia_unregister_driver(&ray_driver);
- BUG_ON(dev_list != NULL);
} /* exit_ray_cs */
module_init(init_ray_cs);
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index 42660fe64bfd9..bd73ebf033401 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -31,7 +31,7 @@ typedef struct ray_dev_t {
void __iomem *sram; /* pointer to beginning of shared RAM */
void __iomem *amem; /* pointer to attribute mem window */
void __iomem *rmem; /* pointer to receive buffer window */
- dev_link_t *finder; /* pointer back to dev_link_t for card */
+ struct pcmcia_device *finder; /* pointer back to struct pcmcia_device for card */
struct timer_list timer;
long tx_ccs_lock;
long ccs_lock;
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 5fa6fbe35bb94..f7b77ce54d7be 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -63,7 +63,7 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
/* PCMCIA specific device information (goes in the card field of
* struct orinoco_private */
struct orinoco_pccard {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
};
@@ -71,8 +71,8 @@ struct orinoco_pccard {
/* Function prototypes */
/********************************************************************/
-static void spectrum_cs_config(dev_link_t *link);
-static void spectrum_cs_release(dev_link_t *link);
+static int spectrum_cs_config(struct pcmcia_device *link);
+static void spectrum_cs_release(struct pcmcia_device *link);
/********************************************************************/
/* Firmware downloader */
@@ -238,14 +238,14 @@ spectrum_aux_open(hermes_t *hw)
* If IDLE is 1, stop the firmware, so that it can be safely rewritten.
*/
static int
-spectrum_reset(dev_link_t *link, int idle)
+spectrum_reset(struct pcmcia_device *link, int idle)
{
int last_ret, last_fn;
conf_reg_t reg;
u_int save_cor;
/* Doing it if hardware is gone is guaranteed crash */
- if (!(link->state & DEV_CONFIG))
+ if (pcmcia_dev_present(link))
return -ENODEV;
/* Save original COR value */
@@ -253,7 +253,7 @@ spectrum_reset(dev_link_t *link, int idle)
reg.Action = CS_READ;
reg.Offset = CISREG_COR;
CS_CHECK(AccessConfigurationRegister,
- pcmcia_access_configuration_register(link->handle, &reg));
+ pcmcia_access_configuration_register(link, &reg));
save_cor = reg.Value;
/* Soft-Reset card */
@@ -261,14 +261,14 @@ spectrum_reset(dev_link_t *link, int idle)
reg.Offset = CISREG_COR;
reg.Value = (save_cor | COR_SOFT_RESET);
CS_CHECK(AccessConfigurationRegister,
- pcmcia_access_configuration_register(link->handle, &reg));
+ pcmcia_access_configuration_register(link, &reg));
udelay(1000);
/* Read CCSR */
reg.Action = CS_READ;
reg.Offset = CISREG_CCSR;
CS_CHECK(AccessConfigurationRegister,
- pcmcia_access_configuration_register(link->handle, &reg));
+ pcmcia_access_configuration_register(link, &reg));
/*
* Start or stop the firmware. Memory width bit should be
@@ -278,7 +278,7 @@ spectrum_reset(dev_link_t *link, int idle)
reg.Offset = CISREG_CCSR;
reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16);
CS_CHECK(AccessConfigurationRegister,
- pcmcia_access_configuration_register(link->handle, &reg));
+ pcmcia_access_configuration_register(link, &reg));
udelay(1000);
/* Restore original COR configuration index */
@@ -286,12 +286,12 @@ spectrum_reset(dev_link_t *link, int idle)
reg.Offset = CISREG_COR;
reg.Value = (save_cor & ~COR_SOFT_RESET);
CS_CHECK(AccessConfigurationRegister,
- pcmcia_access_configuration_register(link->handle, &reg));
+ pcmcia_access_configuration_register(link, &reg));
udelay(1000);
return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
return -ENODEV;
}
@@ -441,7 +441,7 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
* care of the PDA - read it and then write it on top of the firmware.
*/
static int
-spectrum_dl_image(hermes_t *hw, dev_link_t *link,
+spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
const unsigned char *image)
{
int ret;
@@ -505,14 +505,13 @@ spectrum_dl_image(hermes_t *hw, dev_link_t *link,
* reset on the card, to make sure it's in a sane state.
*/
static int
-spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
+spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link)
{
int ret;
- client_handle_t handle = link->handle;
const struct firmware *fw_entry;
if (request_firmware(&fw_entry, primary_fw_name,
- &handle_to_dev(handle)) == 0) {
+ &handle_to_dev(link)) == 0) {
primsym = fw_entry->data;
} else {
printk(KERN_ERR PFX "Cannot find firmware: %s\n",
@@ -521,7 +520,7 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
}
if (request_firmware(&fw_entry, secondary_fw_name,
- &handle_to_dev(handle)) == 0) {
+ &handle_to_dev(link)) == 0) {
secsym = fw_entry->data;
} else {
printk(KERN_ERR PFX "Cannot find firmware: %s\n",
@@ -554,12 +553,12 @@ static int
spectrum_cs_hard_reset(struct orinoco_private *priv)
{
struct orinoco_pccard *card = priv->card;
- dev_link_t *link = &card->link;
+ struct pcmcia_device *link = card->p_dev;
int err;
if (!hermes_present(&priv->hw)) {
/* The firmware needs to be reloaded */
- if (spectrum_dl_firmware(&priv->hw, &card->link) != 0) {
+ if (spectrum_dl_firmware(&priv->hw, link) != 0) {
printk(KERN_ERR PFX "Firmware download failed\n");
err = -ENODEV;
}
@@ -584,12 +583,11 @@ spectrum_cs_hard_reset(struct orinoco_private *priv)
* configure the card at this point -- we wait until we receive a card
* insertion event. */
static int
-spectrum_cs_attach(struct pcmcia_device *p_dev)
+spectrum_cs_probe(struct pcmcia_device *link)
{
struct net_device *dev;
struct orinoco_private *priv;
struct orinoco_pccard *card;
- dev_link_t *link;
dev = alloc_orinocodev(sizeof(*card), spectrum_cs_hard_reset);
if (! dev)
@@ -598,7 +596,7 @@ spectrum_cs_attach(struct pcmcia_device *p_dev)
card = priv->card;
/* Link both structures together */
- link = &card->link;
+ card->p_dev = link;
link->priv = dev;
/* Interrupt setup */
@@ -615,13 +613,7 @@ spectrum_cs_attach(struct pcmcia_device *p_dev)
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- spectrum_cs_config(link);
-
- return 0;
+ return spectrum_cs_config(link);
} /* spectrum_cs_attach */
/*
@@ -630,16 +622,14 @@ spectrum_cs_attach(struct pcmcia_device *p_dev)
* are freed. Otherwise, the structures will be freed when the device
* is released.
*/
-static void spectrum_cs_detach(struct pcmcia_device *p_dev)
+static void spectrum_cs_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- if (link->state & DEV_CONFIG)
- spectrum_cs_release(link);
+ spectrum_cs_release(link);
- DEBUG(0, PFX "detach: link=%p link->dev=%p\n", link, link->dev);
- if (link->dev) {
+ DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
+ if (link->dev_node) {
DEBUG(0, PFX "About to unregister net device %p\n",
dev);
unregister_netdev(dev);
@@ -653,11 +643,10 @@ static void spectrum_cs_detach(struct pcmcia_device *p_dev)
* device available to the system.
*/
-static void
-spectrum_cs_config(dev_link_t *link)
+static int
+spectrum_cs_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- client_handle_t handle = link->handle;
struct orinoco_private *priv = netdev_priv(dev);
struct orinoco_pccard *card = priv->card;
hermes_t *hw = &priv->hw;
@@ -669,7 +658,7 @@ spectrum_cs_config(dev_link_t *link)
cisparse_t parse;
void __iomem *mem;
- CS_CHECK(ValidateCIS, pcmcia_validate_cis(handle, &info));
+ CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
/*
* This reads the card's CONFIG tuple to find its
@@ -680,19 +669,15 @@ spectrum_cs_config(dev_link_t *link)
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/* Look up the current Vcc */
CS_CHECK(GetConfigurationInfo,
- pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
+ pcmcia_get_configuration_info(link, &conf));
/*
* In this loop, we scan the CIS for configuration table
@@ -709,13 +694,13 @@ spectrum_cs_config(dev_link_t *link)
* implementation-defined details.
*/
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
cistpl_cftable_entry_t dflt = { .index = 0 };
- if ( (pcmcia_get_tuple_data(handle, &tuple) != 0)
- || (pcmcia_parse_tuple(handle, &tuple, &parse) != 0))
+ if ( (pcmcia_get_tuple_data(link, &tuple) != 0)
+ || (pcmcia_parse_tuple(link, &tuple, &parse) != 0))
goto next_entry;
if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
@@ -747,10 +732,10 @@ spectrum_cs_config(dev_link_t *link)
}
if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
/* Do we need to allocate an interrupt? */
@@ -780,7 +765,7 @@ spectrum_cs_config(dev_link_t *link)
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
}
@@ -790,9 +775,8 @@ spectrum_cs_config(dev_link_t *link)
break;
next_entry:
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- last_ret = pcmcia_get_next_tuple(handle, &tuple);
+ pcmcia_disable_device(link);
+ last_ret = pcmcia_get_next_tuple(link, &tuple);
if (last_ret == CS_NO_MORE_ITEMS) {
printk(KERN_ERR PFX "GetNextTuple(): No matching "
"CIS configuration. Maybe you need the "
@@ -806,7 +790,7 @@ spectrum_cs_config(dev_link_t *link)
* a handler to the interrupt, unless the 'Handler' member of
* the irq structure is initialized.
*/
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
/* We initialize the hermes structure before completing PCMCIA
* configuration just in case the interrupt handler gets
@@ -823,7 +807,7 @@ spectrum_cs_config(dev_link_t *link)
* card and host interface into "Memory and IO" mode.
*/
CS_CHECK(RequestConfiguration,
- pcmcia_request_configuration(link->handle, &link->conf));
+ pcmcia_request_configuration(link, &link->conf));
/* Ok, we have the configuration, prepare to register the netdev */
dev->base_addr = link->io.BasePort1;
@@ -836,7 +820,7 @@ spectrum_cs_config(dev_link_t *link)
goto failed;
}
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
/* Tell the stack we exist */
if (register_netdev(dev) != 0) {
printk(KERN_ERR PFX "register_netdev() failed\n");
@@ -844,20 +828,18 @@ spectrum_cs_config(dev_link_t *link)
}
/* At this point, the dev_node_t structure(s) needs to be
- * initialized and arranged in a linked list at link->dev. */
+ * initialized and arranged in a linked list at link->dev_node. */
strcpy(card->node.dev_name, dev->name);
- link->dev = &card->node; /* link->dev being non-NULL is also
+ link->dev_node = &card->node; /* link->dev_node being non-NULL is also
used to indicate that the
net_device has been registered */
- link->state &= ~DEV_CONFIG_PENDING;
/* Finally, report what we've done */
- printk(KERN_DEBUG "%s: index 0x%02x: Vcc %d.%d",
- dev->name, link->conf.ConfigIndex,
- link->conf.Vcc / 10, link->conf.Vcc % 10);
- if (link->conf.Vpp1)
- printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
- link->conf.Vpp1 % 10);
+ printk(KERN_DEBUG "%s: index 0x%02x: ",
+ dev->name, link->conf.ConfigIndex);
+ if (link->conf.Vpp)
+ printk(", Vpp %d.%d", link->conf.Vpp / 10,
+ link->conf.Vpp % 10);
printk(", irq %d", link->irq.AssignedIRQ);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
@@ -867,13 +849,14 @@ spectrum_cs_config(dev_link_t *link)
link->io.BasePort2 + link->io.NumPorts2 - 1);
printk("\n");
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
spectrum_cs_release(link);
+ return -ENODEV;
} /* spectrum_cs_config */
/*
@@ -882,7 +865,7 @@ spectrum_cs_config(dev_link_t *link)
* still open, this will be postponed until it is closed.
*/
static void
-spectrum_cs_release(dev_link_t *link)
+spectrum_cs_release(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct orinoco_private *priv = netdev_priv(dev);
@@ -894,64 +877,46 @@ spectrum_cs_release(dev_link_t *link)
priv->hw_unavailable++;
spin_unlock_irqrestore(&priv->lock, flags);
- /* Don't bother checking to see if these succeed or not */
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- if (link->irq.AssignedIRQ)
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
if (priv->hw.iobase)
ioport_unmap(priv->hw.iobase);
} /* spectrum_cs_release */
static int
-spectrum_cs_suspend(struct pcmcia_device *p_dev)
+spectrum_cs_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
struct orinoco_private *priv = netdev_priv(dev);
unsigned long flags;
int err = 0;
- link->state |= DEV_SUSPEND;
/* Mark the device as stopped, to block IO until later */
- if (link->state & DEV_CONFIG) {
- spin_lock_irqsave(&priv->lock, flags);
-
- err = __orinoco_down(dev);
- if (err)
- printk(KERN_WARNING "%s: Error %d downing interface\n",
- dev->name, err);
+ spin_lock_irqsave(&priv->lock, flags);
- netif_device_detach(dev);
- priv->hw_unavailable++;
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: Error %d downing interface\n",
+ dev->name, err);
- spin_unlock_irqrestore(&priv->lock, flags);
+ netif_device_detach(dev);
+ priv->hw_unavailable++;
- pcmcia_release_configuration(link->handle);
- }
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int
-spectrum_cs_resume(struct pcmcia_device *p_dev)
+spectrum_cs_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
struct orinoco_private *priv = netdev_priv(dev);
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- /* FIXME: should we double check that this is
- * the same card as we had before */
- pcmcia_request_configuration(link->handle, &link->conf);
- netif_device_attach(dev);
- priv->hw_unavailable--;
- schedule_work(&priv->reset_work);
- }
+ netif_device_attach(dev);
+ priv->hw_unavailable--;
+ schedule_work(&priv->reset_work);
+
return 0;
}
@@ -979,7 +944,7 @@ static struct pcmcia_driver orinoco_driver = {
.drv = {
.name = DRIVER_NAME,
},
- .probe = spectrum_cs_attach,
+ .probe = spectrum_cs_probe,
.remove = spectrum_cs_detach,
.suspend = spectrum_cs_suspend,
.resume = spectrum_cs_resume,
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 98122f3a4bc27..f7724eb2fa7ee 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1005,7 +1005,7 @@ static inline void
wv_82593_reconfig(struct net_device * dev)
{
net_local * lp = netdev_priv(dev);
- dev_link_t * link = lp->link;
+ struct pcmcia_device * link = lp->link;
unsigned long flags;
/* Arm the flag, will be cleard in wv_82593_config() */
@@ -3744,16 +3744,16 @@ wv_pcmcia_reset(struct net_device * dev)
{
int i;
conf_reg_t reg = { 0, CS_READ, CISREG_COR, 0 };
- dev_link_t * link = ((net_local *)netdev_priv(dev))->link;
+ struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link;
#ifdef DEBUG_CONFIG_TRACE
printk(KERN_DEBUG "%s: ->wv_pcmcia_reset()\n", dev->name);
#endif
- i = pcmcia_access_configuration_register(link->handle, &reg);
+ i = pcmcia_access_configuration_register(link, &reg);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, AccessConfigurationRegister, i);
+ cs_error(link, AccessConfigurationRegister, i);
return FALSE;
}
@@ -3764,19 +3764,19 @@ wv_pcmcia_reset(struct net_device * dev)
reg.Action = CS_WRITE;
reg.Value = reg.Value | COR_SW_RESET;
- i = pcmcia_access_configuration_register(link->handle, &reg);
+ i = pcmcia_access_configuration_register(link, &reg);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, AccessConfigurationRegister, i);
+ cs_error(link, AccessConfigurationRegister, i);
return FALSE;
}
reg.Action = CS_WRITE;
reg.Value = COR_LEVEL_IRQ | COR_CONFIG;
- i = pcmcia_access_configuration_register(link->handle, &reg);
+ i = pcmcia_access_configuration_register(link, &reg);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, AccessConfigurationRegister, i);
+ cs_error(link, AccessConfigurationRegister, i);
return FALSE;
}
@@ -3940,9 +3940,8 @@ wv_hw_reset(struct net_device * dev)
* (called by wavelan_event())
*/
static inline int
-wv_pcmcia_config(dev_link_t * link)
+wv_pcmcia_config(struct pcmcia_device * link)
{
- client_handle_t handle = link->handle;
tuple_t tuple;
cisparse_t parse;
struct net_device * dev = (struct net_device *) link->priv;
@@ -3965,16 +3964,16 @@ wv_pcmcia_config(dev_link_t * link)
{
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- i = pcmcia_get_first_tuple(handle, &tuple);
+ i = pcmcia_get_first_tuple(link, &tuple);
if(i != CS_SUCCESS)
break;
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
- i = pcmcia_get_tuple_data(handle, &tuple);
+ i = pcmcia_get_tuple_data(link, &tuple);
if(i != CS_SUCCESS)
break;
- i = pcmcia_parse_tuple(handle, &tuple, &parse);
+ i = pcmcia_parse_tuple(link, &tuple, &parse);
if(i != CS_SUCCESS)
break;
link->conf.ConfigBase = parse.config.base;
@@ -3983,19 +3982,16 @@ wv_pcmcia_config(dev_link_t * link)
while(0);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, ParseTuple, i);
- link->state &= ~DEV_CONFIG_PENDING;
+ cs_error(link, ParseTuple, i);
return FALSE;
}
-
- /* Configure card */
- link->state |= DEV_CONFIG;
+
do
{
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
break;
}
@@ -4003,10 +3999,10 @@ wv_pcmcia_config(dev_link_t * link)
* Now allocate an interrupt line. Note that this does not
* actually assign a handler to the interrupt.
*/
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, RequestIRQ, i);
+ cs_error(link, RequestIRQ, i);
break;
}
@@ -4015,15 +4011,15 @@ wv_pcmcia_config(dev_link_t * link)
* the I/O windows and the interrupt mapping.
*/
link->conf.ConfigIndex = 1;
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, RequestConfiguration, i);
+ cs_error(link, RequestConfiguration, i);
break;
}
/*
- * Allocate a small memory window. Note that the dev_link_t
+ * Allocate a small memory window. Note that the struct pcmcia_device
* structure provides space for one window handle -- if your
* device needs several windows, you'll need to keep track of
* the handles in your private data structure, link->priv.
@@ -4031,10 +4027,10 @@ wv_pcmcia_config(dev_link_t * link)
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
req.Base = req.Size = 0;
req.AccessSpeed = mem_speed;
- i = pcmcia_request_window(&link->handle, &req, &link->win);
+ i = pcmcia_request_window(&link, &req, &link->win);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, RequestWindow, i);
+ cs_error(link, RequestWindow, i);
break;
}
@@ -4046,7 +4042,7 @@ wv_pcmcia_config(dev_link_t * link)
i = pcmcia_map_mem_page(link->win, &mem);
if(i != CS_SUCCESS)
{
- cs_error(link->handle, MapMemPage, i);
+ cs_error(link, MapMemPage, i);
break;
}
@@ -4060,7 +4056,7 @@ wv_pcmcia_config(dev_link_t * link)
lp->mem, dev->irq, (u_int) dev->base_addr);
#endif
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
i = register_netdev(dev);
if(i != 0)
{
@@ -4072,7 +4068,6 @@ wv_pcmcia_config(dev_link_t * link)
}
while(0); /* Humm... Disguised goto !!! */
- link->state &= ~DEV_CONFIG_PENDING;
/* If any step failed, release any partially configured state */
if(i != 0)
{
@@ -4081,7 +4076,7 @@ wv_pcmcia_config(dev_link_t * link)
}
strcpy(((net_local *) netdev_priv(dev))->node.dev_name, dev->name);
- link->dev = &((net_local *) netdev_priv(dev))->node;
+ link->dev_node = &((net_local *) netdev_priv(dev))->node;
#ifdef DEBUG_CONFIG_TRACE
printk(KERN_DEBUG "<-wv_pcmcia_config()\n");
@@ -4096,26 +4091,20 @@ wv_pcmcia_config(dev_link_t * link)
* still open, this will be postponed until it is closed.
*/
static void
-wv_pcmcia_release(dev_link_t *link)
+wv_pcmcia_release(struct pcmcia_device *link)
{
- struct net_device * dev = (struct net_device *) link->priv;
- net_local * lp = netdev_priv(dev);
+ struct net_device * dev = (struct net_device *) link->priv;
+ net_local * lp = netdev_priv(dev);
#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: -> wv_pcmcia_release(0x%p)\n", dev->name, link);
+ printk(KERN_DEBUG "%s: -> wv_pcmcia_release(0x%p)\n", dev->name, link);
#endif
- /* Don't bother checking to see if these succeed or not */
- iounmap(lp->mem);
- pcmcia_release_window(link->win);
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ iounmap(lp->mem);
+ pcmcia_disable_device(link);
#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <- wv_pcmcia_release()\n", dev->name);
+ printk(KERN_DEBUG "%s: <- wv_pcmcia_release()\n", dev->name);
#endif
}
@@ -4479,7 +4468,7 @@ static int
wavelan_open(struct net_device * dev)
{
net_local * lp = netdev_priv(dev);
- dev_link_t * link = lp->link;
+ struct pcmcia_device * link = lp->link;
kio_addr_t base = dev->base_addr;
#ifdef DEBUG_CALLBACK_TRACE
@@ -4533,7 +4522,7 @@ wavelan_open(struct net_device * dev)
static int
wavelan_close(struct net_device * dev)
{
- dev_link_t * link = ((net_local *)netdev_priv(dev))->link;
+ struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link;
kio_addr_t base = dev->base_addr;
#ifdef DEBUG_CALLBACK_TRACE
@@ -4587,45 +4576,36 @@ wavelan_close(struct net_device * dev)
* card insertion event.
*/
static int
-wavelan_attach(struct pcmcia_device *p_dev)
+wavelan_probe(struct pcmcia_device *p_dev)
{
- dev_link_t * link; /* Info for cardmgr */
struct net_device * dev; /* Interface generic data */
net_local * lp; /* Interface specific data */
+ int ret;
#ifdef DEBUG_CALLBACK_TRACE
printk(KERN_DEBUG "-> wavelan_attach()\n");
#endif
- /* Initialize the dev_link_t structure */
- link = kzalloc(sizeof(struct dev_link_t), GFP_KERNEL);
- if (!link) return -ENOMEM;
-
/* The io structure describes IO port mapping */
- link->io.NumPorts1 = 8;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.IOAddrLines = 3;
+ p_dev->io.NumPorts1 = 8;
+ p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ p_dev->io.IOAddrLines = 3;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
- link->irq.Handler = wavelan_interrupt;
+ p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ p_dev->irq.Handler = wavelan_interrupt;
/* General socket configuration */
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
-
- /* Chain drivers */
- link->next = NULL;
+ p_dev->conf.Attributes = CONF_ENABLE_IRQ;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
/* Allocate the generic data structure */
dev = alloc_etherdev(sizeof(net_local));
- if (!dev) {
- kfree(link);
+ if (!dev)
return -ENOMEM;
- }
- link->priv = link->irq.Instance = dev;
+
+ p_dev->priv = p_dev->irq.Instance = dev;
lp = netdev_priv(dev);
@@ -4642,7 +4622,6 @@ wavelan_attach(struct pcmcia_device *p_dev)
spin_lock_init(&lp->spinlock);
/* back links */
- lp->link = link;
lp->dev = dev;
/* wavelan NET3 callbacks */
@@ -4668,15 +4647,18 @@ wavelan_attach(struct pcmcia_device *p_dev)
/* Other specific data */
dev->mtu = WAVELAN_MTU;
- link->handle = p_dev;
- p_dev->instance = link;
+ ret = wv_pcmcia_config(p_dev);
+ if (ret)
+ return ret;
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- if(wv_pcmcia_config(link) &&
- wv_hw_config(dev))
- wv_init_info(dev);
- else
+ ret = wv_hw_config(dev);
+ if (ret) {
dev->irq = 0;
+ pcmcia_disable_device(p_dev);
+ return ret;
+ }
+
+ wv_init_info(dev);
#ifdef DEBUG_CALLBACK_TRACE
printk(KERN_DEBUG "<- wavelan_attach()\n");
@@ -4693,25 +4675,14 @@ wavelan_attach(struct pcmcia_device *p_dev)
* is released.
*/
static void
-wavelan_detach(struct pcmcia_device *p_dev)
+wavelan_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
#ifdef DEBUG_CALLBACK_TRACE
printk(KERN_DEBUG "-> wavelan_detach(0x%p)\n", link);
#endif
- /*
- * If the device is currently configured and active, we won't
- * actually delete it yet. Instead, it is marked so that when the
- * release() function is called, that will trigger a proper
- * detach().
- */
- if(link->state & DEV_CONFIG)
- {
- /* Some others haven't done their job : give them another chance */
- wv_pcmcia_release(link);
- }
+ /* Some others haven't done their job : give them another chance */
+ wv_pcmcia_release(link);
/* Free pieces */
if(link->priv)
@@ -4720,23 +4691,21 @@ wavelan_detach(struct pcmcia_device *p_dev)
/* Remove ourselves from the kernel list of ethernet devices */
/* Warning : can't be called from interrupt, timer or wavelan_close() */
- if (link->dev)
+ if (link->dev_node)
unregister_netdev(dev);
- link->dev = NULL;
+ link->dev_node = NULL;
((net_local *)netdev_priv(dev))->link = NULL;
((net_local *)netdev_priv(dev))->dev = NULL;
free_netdev(dev);
}
- kfree(link);
#ifdef DEBUG_CALLBACK_TRACE
printk(KERN_DEBUG "<- wavelan_detach()\n");
#endif
}
-static int wavelan_suspend(struct pcmcia_device *p_dev)
+static int wavelan_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device * dev = (struct net_device *) link->priv;
/* NB: wavelan_close will be called, but too late, so we are
@@ -4748,36 +4717,22 @@ static int wavelan_suspend(struct pcmcia_device *p_dev)
/* Stop receiving new messages and wait end of transmission */
wv_ru_stop(dev);
+ if (link->open)
+ netif_device_detach(dev);
+
/* Power down the module */
hacr_write(dev->base_addr, HACR_DEFAULT & (~HACR_PWR_STAT));
- /* The card is now suspended */
- link->state |= DEV_SUSPEND;
-
- if(link->state & DEV_CONFIG)
- {
- if(link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
-
return 0;
}
-static int wavelan_resume(struct pcmcia_device *p_dev)
+static int wavelan_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device * dev = (struct net_device *) link->priv;
- link->state &= ~DEV_SUSPEND;
- if(link->state & DEV_CONFIG)
- {
- pcmcia_request_configuration(link->handle, &link->conf);
- if(link->open) /* If RESET -> True, If RESUME -> False ? */
- {
- wv_hw_reset(dev);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ wv_hw_reset(dev);
+ netif_device_attach(dev);
}
return 0;
@@ -4798,7 +4753,7 @@ static struct pcmcia_driver wavelan_driver = {
.drv = {
.name = "wavelan_cs",
},
- .probe = wavelan_attach,
+ .probe = wavelan_probe,
.remove = wavelan_detach,
.id_table = wavelan_ids,
.suspend = wavelan_suspend,
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 451f6271dcbcf..c65fe7a391ecf 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -602,7 +602,7 @@ struct net_local
dev_node_t node; /* ???? What is this stuff ???? */
struct net_device * dev; /* Reverse link... */
spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
- dev_link_t * link; /* pcmcia structure */
+ struct pcmcia_device * link; /* pcmcia structure */
en_stats stats; /* Ethernet interface statistics */
int nresets; /* Number of hw resets */
u_char configured; /* If it is configured */
@@ -733,9 +733,9 @@ static int
static inline void
wv_hw_reset(struct net_device *); /* Same, + start receiver unit */
static inline int
- wv_pcmcia_config(dev_link_t *); /* Configure the pcmcia interface */
+ wv_pcmcia_config(struct pcmcia_device *); /* Configure the pcmcia interface */
static void
- wv_pcmcia_release(dev_link_t *);/* Remove a device */
+ wv_pcmcia_release(struct pcmcia_device *);/* Remove a device */
/* ---------------------- INTERRUPT HANDLING ---------------------- */
static irqreturn_t
wavelan_interrupt(int, /* Interrupt handler */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 4303c50c2ab61..65ceb088f7000 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -611,5 +611,6 @@ struct wl3501_card {
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
struct dev_node_t node;
+ struct pcmcia_device *p_dev;
};
#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 48e10b0c7e747..e52a650f6737a 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -103,8 +103,8 @@ module_param(pc_debug, int, 0);
* release a socket, in response to card insertion and ejection events. They
* are invoked from the wl24 event handler.
*/
-static void wl3501_config(dev_link_t *link);
-static void wl3501_release(dev_link_t *link);
+static int wl3501_config(struct pcmcia_device *link);
+static void wl3501_release(struct pcmcia_device *link);
/*
* The dev_info variable is the "key" that is used to match up this
@@ -226,17 +226,6 @@ static void iw_copy_mgmt_info_element(struct iw_mgmt_info_element *to,
iw_set_mgmt_info_element(from->id, to, from->data, from->len);
}
-/*
- * A linked list of "instances" of the wl24 device. Each actual PCMCIA card
- * corresponds to one device instance, and is described by one dev_link_t
- * structure (defined in ds.h).
- *
- * You may not want to use a linked list for this -- for example, the memory
- * card driver uses an array of dev_link_t pointers, where minor device numbers
- * are used to derive the corresponding array index.
- */
-static dev_link_t *wl3501_dev_list;
-
static inline void wl3501_switch_page(struct wl3501_card *this, u8 page)
{
wl3501_outb(page, this->base_addr + WL3501_NIC_BSS);
@@ -1281,15 +1270,10 @@ static int wl3501_close(struct net_device *dev)
struct wl3501_card *this = dev->priv;
int rc = -ENODEV;
unsigned long flags;
- dev_link_t *link;
+ struct pcmcia_device *link;
+ link = this->p_dev;
spin_lock_irqsave(&this->lock, flags);
- /* Check if the device is in wl3501_dev_list */
- for (link = wl3501_dev_list; link; link = link->next)
- if (link->priv == dev)
- break;
- if (!link)
- goto out;
link->open--;
/* Stop wl3501_hard_start_xmit() from now on */
@@ -1301,7 +1285,6 @@ static int wl3501_close(struct net_device *dev)
rc = 0;
printk(KERN_INFO "%s: WL3501 closed\n", dev->name);
-out:
spin_unlock_irqrestore(&this->lock, flags);
return rc;
}
@@ -1400,14 +1383,11 @@ static int wl3501_open(struct net_device *dev)
int rc = -ENODEV;
struct wl3501_card *this = dev->priv;
unsigned long flags;
- dev_link_t *link;
+ struct pcmcia_device *link;
+ link = this->p_dev;
spin_lock_irqsave(&this->lock, flags);
- /* Check if the device is in wl3501_dev_list */
- for (link = wl3501_dev_list; link; link = link->next)
- if (link->priv == dev)
- break;
- if (!DEV_OK(link))
+ if (!pcmcia_dev_present(link))
goto out;
netif_device_attach(dev);
link->open++;
@@ -1497,38 +1477,23 @@ static struct ethtool_ops ops = {
* Services. If it has been released, all local data structures are freed.
* Otherwise, the structures will be freed when the device is released.
*/
-static void wl3501_detach(struct pcmcia_device *p_dev)
+static void wl3501_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
- dev_link_t **linkp;
struct net_device *dev = link->priv;
- /* Locate device structure */
- for (linkp = &wl3501_dev_list; *linkp; linkp = &(*linkp)->next)
- if (*linkp == link)
- break;
- if (!*linkp)
- goto out;
-
/* If the device is currently configured and active, we won't actually
* delete it yet. Instead, it is marked so that when the release()
* function is called, that will trigger a proper detach(). */
- if (link->state & DEV_CONFIG) {
- while (link->open > 0)
- wl3501_close(dev);
-
- netif_device_detach(dev);
- wl3501_release(link);
- }
+ while (link->open > 0)
+ wl3501_close(dev);
- /* Unlink device structure, free pieces */
- *linkp = link->next;
+ netif_device_detach(dev);
+ wl3501_release(link);
if (link->priv)
free_netdev(link->priv);
- kfree(link);
-out:
+
return;
}
@@ -1953,33 +1918,26 @@ static const struct iw_handler_def wl3501_handler_def = {
* The dev_link structure is initialized, but we don't actually configure the
* card at this point -- we wait until we receive a card insertion event.
*/
-static int wl3501_attach(struct pcmcia_device *p_dev)
+static int wl3501_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
struct net_device *dev;
struct wl3501_card *this;
- /* Initialize the dev_link_t structure */
- link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link)
- return -ENOMEM;
-
/* The io structure describes IO port mapping */
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.IOAddrLines = 5;
+ p_dev->io.NumPorts1 = 16;
+ p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ p_dev->io.IOAddrLines = 5;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
- link->irq.Handler = wl3501_interrupt;
+ p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ p_dev->irq.Handler = wl3501_interrupt;
/* General socket configuration */
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->conf.ConfigIndex = 1;
- link->conf.Present = PRESENT_OPTION;
+ p_dev->conf.Attributes = CONF_ENABLE_IRQ;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
+ p_dev->conf.ConfigIndex = 1;
+ p_dev->conf.Present = PRESENT_OPTION;
dev = alloc_etherdev(sizeof(struct wl3501_card));
if (!dev)
@@ -1992,22 +1950,15 @@ static int wl3501_attach(struct pcmcia_device *p_dev)
dev->get_stats = wl3501_get_stats;
this = dev->priv;
this->wireless_data.spy_data = &this->spy_data;
+ this->p_dev = p_dev;
dev->wireless_data = &this->wireless_data;
dev->wireless_handlers = (struct iw_handler_def *)&wl3501_handler_def;
SET_ETHTOOL_OPS(dev, &ops);
netif_stop_queue(dev);
- link->priv = link->irq.Instance = dev;
-
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- wl3501_config(link);
+ p_dev->priv = p_dev->irq.Instance = dev;
- return 0;
+ return wl3501_config(p_dev);
out_link:
- kfree(link);
- link = NULL;
return -ENOMEM;
}
@@ -2022,11 +1973,10 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
* received, to configure the PCMCIA socket, and to make the ethernet device
* available to the system.
*/
-static void wl3501_config(dev_link_t *link)
+static int wl3501_config(struct pcmcia_device *link)
{
tuple_t tuple;
cisparse_t parse;
- client_handle_t handle = link->handle;
struct net_device *dev = link->priv;
int i = 0, j, last_fn, last_ret;
unsigned char bf[64];
@@ -2035,18 +1985,15 @@ static void wl3501_config(dev_link_t *link)
/* This reads the card's CONFIG tuple to find its config registers. */
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
tuple.TupleData = bf;
tuple.TupleDataMax = sizeof(bf);
tuple.TupleOffset = 0;
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/* Try allocating IO ports. This tries a few fixed addresses. If you
* want, you can also read the card's config table to pick addresses --
* see the serial driver for an example. */
@@ -2056,28 +2003,28 @@ static void wl3501_config(dev_link_t *link)
* 0x200-0x2ff, and so on, because this seems safer */
link->io.BasePort1 = j;
link->io.BasePort2 = link->io.BasePort1 + 0x10;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
break;
}
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
goto failed;
}
/* Now allocate an interrupt line. Note that this does not actually
* assign a handler to the interrupt. */
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
/* This actually configures the PCMCIA socket -- setting up the I/O
* windows and the interrupt mapping. */
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
- SET_NETDEV_DEV(dev, &handle_to_dev(handle));
+ SET_NETDEV_DEV(dev, &handle_to_dev(link));
if (register_netdev(dev)) {
printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n");
goto failed;
@@ -2088,10 +2035,9 @@ static void wl3501_config(dev_link_t *link)
this = dev->priv;
/*
* At this point, the dev_node_t structure(s) should be initialized and
- * arranged in a linked list at link->dev.
+ * arranged in a linked list at link->dev_node.
*/
- link->dev = &this->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &this->node;
this->base_addr = dev->base_addr;
@@ -2127,13 +2073,13 @@ static void wl3501_config(dev_link_t *link)
spin_lock_init(&this->lock);
init_waitqueue_head(&this->wait);
netif_start_queue(dev);
- goto out;
+ return 0;
+
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
wl3501_release(link);
-out:
- return;
+ return -ENODEV;
}
/**
@@ -2144,52 +2090,36 @@ out:
* and release the PCMCIA configuration. If the device is still open, this
* will be postponed until it is closed.
*/
-static void wl3501_release(dev_link_t *link)
+static void wl3501_release(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
/* Unlink the device chain */
- if (link->dev) {
+ if (link->dev_node)
unregister_netdev(dev);
- link->dev = NULL;
- }
- /* Don't bother checking to see if these succeed or not */
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
}
-static int wl3501_suspend(struct pcmcia_device *p_dev)
+static int wl3501_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
- link->state |= DEV_SUSPEND;
-
wl3501_pwr_mgmt(dev->priv, WL3501_SUSPEND);
- if (link->state & DEV_CONFIG) {
- if (link->open)
- netif_device_detach(dev);
- pcmcia_release_configuration(link->handle);
- }
+ if (link->open)
+ netif_device_detach(dev);
return 0;
}
-static int wl3501_resume(struct pcmcia_device *p_dev)
+static int wl3501_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct net_device *dev = link->priv;
wl3501_pwr_mgmt(dev->priv, WL3501_RESUME);
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- if (link->open) {
- wl3501_reset(dev);
- netif_device_attach(dev);
- }
+ if (link->open) {
+ wl3501_reset(dev);
+ netif_device_attach(dev);
}
return 0;
@@ -2207,7 +2137,7 @@ static struct pcmcia_driver wl3501_driver = {
.drv = {
.name = "wl3501_cs",
},
- .probe = wl3501_attach,
+ .probe = wl3501_probe,
.remove = wl3501_detach,
.id_table = wl3501_ids,
.suspend = wl3501_suspend,
@@ -2221,9 +2151,7 @@ static int __init wl3501_init_module(void)
static void __exit wl3501_exit_module(void)
{
- dprintk(0, ": unloading");
pcmcia_unregister_driver(&wl3501_driver);
- BUG_ON(wl3501_dev_list != NULL);
}
module_init(wl3501_init_module);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 75d56bfef0ee7..fd0f43b7db5b0 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1441,8 +1441,7 @@ static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct yellowfin_private *np;
- if (!dev)
- BUG();
+ BUG_ON(!dev);
np = netdev_priv(dev);
pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 158d925632591..b953d5907c05a 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -81,15 +81,15 @@ static char *version =
#define FORCE_EPP_MODE 0x08
typedef struct parport_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
int ndev;
dev_node_t node;
struct parport *port;
} parport_info_t;
static void parport_detach(struct pcmcia_device *p_dev);
-static void parport_config(dev_link_t *link);
-static void parport_cs_release(dev_link_t *);
+static int parport_config(struct pcmcia_device *link);
+static void parport_cs_release(struct pcmcia_device *);
/*======================================================================
@@ -99,10 +99,9 @@ static void parport_cs_release(dev_link_t *);
======================================================================*/
-static int parport_attach(struct pcmcia_device *p_dev)
+static int parport_probe(struct pcmcia_device *link)
{
parport_info_t *info;
- dev_link_t *link;
DEBUG(0, "parport_attach()\n");
@@ -110,23 +109,17 @@ static int parport_attach(struct pcmcia_device *p_dev)
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
memset(info, 0, sizeof(*info));
- link = &info->link; link->priv = info;
+ link->priv = info;
+ info->p_dev = link;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- parport_config(link);
-
- return 0;
+ return parport_config(link);
} /* parport_attach */
/*======================================================================
@@ -138,14 +131,11 @@ static int parport_attach(struct pcmcia_device *p_dev)
======================================================================*/
-static void parport_detach(struct pcmcia_device *p_dev)
+static void parport_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DEBUG(0, "parport_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG)
- parport_cs_release(link);
+ parport_cs_release(link);
kfree(link->priv);
} /* parport_detach */
@@ -161,14 +151,12 @@ static void parport_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-void parport_config(dev_link_t *link)
+static int parport_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
parport_info_t *info = link->priv;
tuple_t tuple;
u_short buf[128];
cisparse_t parse;
- config_info_t conf;
cistpl_cftable_entry_t *cfg = &parse.cftable_entry;
cistpl_cftable_entry_t dflt = { 0 };
struct parport *p;
@@ -180,24 +168,18 @@ void parport_config(dev_link_t *link)
tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
-
- /* Configure card */
- link->state |= DEV_CONFIG;
- /* Not sure if this is right... look up the current Vcc */
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
-
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
@@ -212,7 +194,7 @@ void parport_config(dev_link_t *link)
link->io.BasePort2 = io->win[1].base;
link->io.NumPorts2 = io->win[1].len;
}
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
/* If we've got this far, we're done */
break;
@@ -220,15 +202,12 @@ void parport_config(dev_link_t *link)
next_entry:
if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
- release_region(link->io.BasePort1, link->io.NumPorts1);
- if (link->io.NumPorts2)
- release_region(link->io.BasePort2, link->io.NumPorts2);
p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2,
link->irq.AssignedIRQ, PARPORT_DMA_NONE,
NULL);
@@ -247,17 +226,15 @@ void parport_config(dev_link_t *link)
info->node.minor = p->number;
info->port = p;
strcpy(info->node.dev_name, p->name);
- link->dev = &info->node;
+ link->dev_node = &info->node;
+
+ return 0;
- link->state &= ~DEV_CONFIG_PENDING;
- return;
-
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
parport_cs_release(link);
- link->state &= ~DEV_CONFIG_PENDING;
-
+ return -ENODEV;
} /* parport_config */
/*======================================================================
@@ -268,53 +245,21 @@ failed:
======================================================================*/
-void parport_cs_release(dev_link_t *link)
-{
- parport_info_t *info = link->priv;
-
- DEBUG(0, "parport_release(0x%p)\n", link);
-
- if (info->ndev) {
- struct parport *p = info->port;
- parport_pc_unregister_port(p);
- request_region(link->io.BasePort1, link->io.NumPorts1,
- info->node.dev_name);
- if (link->io.NumPorts2)
- request_region(link->io.BasePort2, link->io.NumPorts2,
- info->node.dev_name);
- }
- info->ndev = 0;
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
-
-} /* parport_cs_release */
-
-static int parport_suspend(struct pcmcia_device *dev)
+void parport_cs_release(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
+ parport_info_t *info = link->priv;
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
+ DEBUG(0, "parport_release(0x%p)\n", link);
- return 0;
-}
-
-static int parport_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
+ if (info->ndev) {
+ struct parport *p = info->port;
+ parport_pc_unregister_port(p);
+ }
+ info->ndev = 0;
- link->state &= ~DEV_SUSPEND;
- if (DEV_OK(link))
- pcmcia_request_configuration(link->handle, &link->conf);
+ pcmcia_disable_device(link);
+} /* parport_cs_release */
- return 0;
-}
static struct pcmcia_device_id parport_ids[] = {
PCMCIA_DEVICE_FUNC_ID(3),
@@ -328,11 +273,9 @@ static struct pcmcia_driver parport_cs_driver = {
.drv = {
.name = "parport_cs",
},
- .probe = parport_attach,
+ .probe = parport_probe,
.remove = parport_detach,
.id_table = parport_ids,
- .suspend = parport_suspend,
- .resume = parport_resume,
};
static int __init init_parport_cs(void)
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 1f4ad0e7836e6..cba6c9eef28e5 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -263,6 +263,13 @@ config OMAP_CF
Say Y here to support the CompactFlash controller on OMAP.
Note that this doesn't support "True IDE" mode.
+config AT91_CF
+ tristate "AT91 CompactFlash Controller"
+ depends on PCMCIA && ARCH_AT91RM9200
+ help
+ Say Y here to support the CompactFlash controller on AT91 chips.
+ Or choose M to compile the driver as a module named "at91_cf".
+
config PCCARD_NONSTATIC
tristate
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index bcecf5133b7ef..4276965517f21 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -10,7 +10,7 @@ pcmcia_core-y += cs.o cistpl.o rsrc_mgr.o socket_sysfs.o
pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o
obj-$(CONFIG_PCCARD) += pcmcia_core.o
-pcmcia-y += ds.o pcmcia_compat.o pcmcia_resource.o
+pcmcia-y += ds.o pcmcia_resource.o
pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o
obj-$(CONFIG_PCMCIA) += pcmcia.o
@@ -36,6 +36,7 @@ obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
obj-$(CONFIG_OMAP_CF) += omap_cf.o
+obj-$(CONFIG_AT91_CF) += at91_cf.o
sa11xx_core-y += soc_common.o sa11xx_base.o
pxa2xx_core-y += soc_common.o pxa2xx_base.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
new file mode 100644
index 0000000000000..67cc5f7d0c906
--- /dev/null
+++ b/drivers/pcmcia/at91_cf.c
@@ -0,0 +1,365 @@
+/*
+ * at91_cf.c -- AT91 CompactFlash controller driver
+ *
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <pcmcia/ss.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+#include <asm/arch/at91rm9200.h>
+#include <asm/arch/board.h>
+#include <asm/arch/gpio.h>
+
+
+#define CF_SIZE 0x30000000 /* CS5+CS6: unavailable */
+
+/*
+ * A0..A10 work in each range; A23 indicates I/O space; A25 is CFRNW;
+ * some other bit in {A24,A22..A11} is nREG to flag memory access
+ * (vs attributes). So more than 2KB/region would just be waste.
+ */
+#define CF_ATTR_PHYS (AT91_CF_BASE)
+#define CF_IO_PHYS (AT91_CF_BASE + (1 << 23))
+#define CF_MEM_PHYS (AT91_CF_BASE + 0x017ff800)
+
+/*--------------------------------------------------------------------------*/
+
+static const char driver_name[] = "at91_cf";
+
+struct at91_cf_socket {
+ struct pcmcia_socket socket;
+
+ unsigned present:1;
+
+ struct platform_device *pdev;
+ struct at91_cf_data *board;
+};
+
+#define SZ_2K (2 * SZ_1K)
+
+static inline int at91_cf_present(struct at91_cf_socket *cf)
+{
+ return !at91_get_gpio_value(cf->board->det_pin);
+}
+
+/*--------------------------------------------------------------------------*/
+
+static int at91_cf_ss_init(struct pcmcia_socket *s)
+{
+ return 0;
+}
+
+static irqreturn_t at91_cf_irq(int irq, void *_cf, struct pt_regs *r)
+{
+ struct at91_cf_socket *cf = (struct at91_cf_socket *) _cf;
+
+ if (irq == cf->board->det_pin) {
+ unsigned present = at91_cf_present(cf);
+
+ /* kick pccard as needed */
+ if (present != cf->present) {
+ cf->present = present;
+ pr_debug("%s: card %s\n", driver_name, present ? "present" : "gone");
+ pcmcia_parse_events(&cf->socket, SS_DETECT);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
+{
+ struct at91_cf_socket *cf;
+
+ if (!sp)
+ return -EINVAL;
+
+ cf = container_of(s, struct at91_cf_socket, socket);
+
+ /* NOTE: we assume 3VCARD, not XVCARD... */
+ if (at91_cf_present(cf)) {
+ int rdy = cf->board->irq_pin; /* RDY/nIRQ */
+ int vcc = cf->board->vcc_pin;
+
+ *sp = SS_DETECT | SS_3VCARD;
+ if (!rdy || at91_get_gpio_value(rdy))
+ *sp |= SS_READY;
+ if (!vcc || at91_get_gpio_value(vcc))
+ *sp |= SS_POWERON;
+ } else
+ *sp = 0;
+
+ return 0;
+}
+
+static int at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
+{
+ struct at91_cf_socket *cf;
+
+ cf = container_of(sock, struct at91_cf_socket, socket);
+
+ /* switch Vcc if needed and possible */
+ if (cf->board->vcc_pin) {
+ switch (s->Vcc) {
+ case 0:
+ at91_set_gpio_value(cf->board->vcc_pin, 0);
+ break;
+ case 33:
+ at91_set_gpio_value(cf->board->vcc_pin, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* toggle reset if needed */
+ at91_set_gpio_value(cf->board->rst_pin, s->flags & SS_RESET);
+
+ pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n",
+ driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask);
+
+ return 0;
+}
+
+static int at91_cf_ss_suspend(struct pcmcia_socket *s)
+{
+ return at91_cf_set_socket(s, &dead_socket);
+}
+
+/* we already mapped the I/O region */
+static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
+{
+ struct at91_cf_socket *cf;
+ u32 csr;
+
+ cf = container_of(s, struct at91_cf_socket, socket);
+ io->flags &= (MAP_ACTIVE | MAP_16BIT | MAP_AUTOSZ);
+
+ /*
+ * Use 16 bit accesses unless/until we need 8-bit i/o space.
+ * Always set CSR4 ... PCMCIA won't always unmap things.
+ */
+ csr = at91_sys_read(AT91_SMC_CSR(4)) & ~AT91_SMC_DBW;
+
+ /*
+ * NOTE: this CF controller ignores IOIS16, so we can't really do
+ * MAP_AUTOSZ. The 16bit mode allows single byte access on either
+ * D0-D7 (even addr) or D8-D15 (odd), so it's close enough for many
+ * purposes (and handles ide-cs).
+ *
+ * The 8bit mode is needed for odd byte access on D0-D7. It seems
+ * some cards only like that way to get at the odd byte, despite
+ * CF 3.0 spec table 35 also giving the D8-D15 option.
+ */
+ if (!(io->flags & (MAP_16BIT|MAP_AUTOSZ))) {
+ csr |= AT91_SMC_DBW_8;
+ pr_debug("%s: 8bit i/o bus\n", driver_name);
+ } else {
+ csr |= AT91_SMC_DBW_16;
+ pr_debug("%s: 16bit i/o bus\n", driver_name);
+ }
+ at91_sys_write(AT91_SMC_CSR(4), csr);
+
+ io->start = cf->socket.io_offset;
+ io->stop = io->start + SZ_2K - 1;
+
+ return 0;
+}
+
+/* pcmcia layer maps/unmaps mem regions */
+static int at91_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
+{
+ struct at91_cf_socket *cf;
+
+ if (map->card_start)
+ return -EINVAL;
+
+ cf = container_of(s, struct at91_cf_socket, socket);
+
+ map->flags &= MAP_ACTIVE|MAP_ATTRIB|MAP_16BIT;
+ if (map->flags & MAP_ATTRIB)
+ map->static_start = CF_ATTR_PHYS;
+ else
+ map->static_start = CF_MEM_PHYS;
+
+ return 0;
+}
+
+static struct pccard_operations at91_cf_ops = {
+ .init = at91_cf_ss_init,
+ .suspend = at91_cf_ss_suspend,
+ .get_status = at91_cf_get_status,
+ .set_socket = at91_cf_set_socket,
+ .set_io_map = at91_cf_set_io_map,
+ .set_mem_map = at91_cf_set_mem_map,
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int __init at91_cf_probe(struct device *dev)
+{
+ struct at91_cf_socket *cf;
+ struct at91_cf_data *board = dev->platform_data;
+ struct platform_device *pdev = to_platform_device(dev);
+ unsigned int csa;
+ int status;
+
+ if (!board || !board->det_pin || !board->rst_pin)
+ return -ENODEV;
+
+ cf = kcalloc(1, sizeof *cf, GFP_KERNEL);
+ if (!cf)
+ return -ENOMEM;
+
+ cf->board = board;
+ cf->pdev = pdev;
+ dev_set_drvdata(dev, cf);
+
+ /* CF takes over CS4, CS5, CS6 */
+ csa = at91_sys_read(AT91_EBI_CSA);
+ at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS4A_SMC_COMPACTFLASH);
+
+ /* force poweron defaults for these pins ... */
+ (void) at91_set_A_periph(AT91_PIN_PC9, 0); /* A25/CFRNW */
+ (void) at91_set_A_periph(AT91_PIN_PC10, 0); /* NCS4/CFCS */
+ (void) at91_set_A_periph(AT91_PIN_PC11, 0); /* NCS5/CFCE1 */
+ (void) at91_set_A_periph(AT91_PIN_PC12, 0); /* NCS6/CFCE2 */
+
+ /* nWAIT is _not_ a default setting */
+ (void) at91_set_A_periph(AT91_PIN_PC6, 1); /* nWAIT */
+
+ /*
+ * Static memory controller timing adjustments.
+ * REVISIT: these timings are in terms of MCK cycles, so
+ * when MCK changes (cpufreq etc) so must these values...
+ */
+ at91_sys_write(AT91_SMC_CSR(4), AT91_SMC_ACSS_STD | AT91_SMC_DBW_16 | AT91_SMC_BAT | AT91_SMC_WSEN
+ | AT91_SMC_NWS_(32) /* wait states */
+ | AT91_SMC_RWSETUP_(6) /* setup time */
+ | AT91_SMC_RWHOLD_(4) /* hold time */
+ );
+
+ /* must be a GPIO; ergo must trigger on both edges */
+ status = request_irq(board->det_pin, at91_cf_irq,
+ SA_SAMPLE_RANDOM, driver_name, cf);
+ if (status < 0)
+ goto fail0;
+
+ /*
+ * The card driver will request this irq later as needed.
+ * but it causes lots of "irqNN: nobody cared" messages
+ * unless we report that we handle everything (sigh).
+ * (Note: DK board doesn't wire the IRQ pin...)
+ */
+ if (board->irq_pin) {
+ status = request_irq(board->irq_pin, at91_cf_irq,
+ SA_SHIRQ, driver_name, cf);
+ if (status < 0)
+ goto fail0a;
+ cf->socket.pci_irq = board->irq_pin;
+ }
+ else
+ cf->socket.pci_irq = NR_IRQS + 1;
+
+ /* pcmcia layer only remaps "real" memory not iospace */
+ cf->socket.io_offset = (unsigned long) ioremap(CF_IO_PHYS, SZ_2K);
+ if (!cf->socket.io_offset)
+ goto fail1;
+
+ /* reserve CS4, CS5, and CS6 regions; but use just CS4 */
+ if (!request_mem_region(AT91_CF_BASE, CF_SIZE, driver_name))
+ goto fail1;
+
+ pr_info("%s: irqs det #%d, io #%d\n", driver_name,
+ board->det_pin, board->irq_pin);
+
+ cf->socket.owner = THIS_MODULE;
+ cf->socket.dev.dev = dev;
+ cf->socket.ops = &at91_cf_ops;
+ cf->socket.resource_ops = &pccard_static_ops;
+ cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
+ | SS_CAP_MEM_ALIGN;
+ cf->socket.map_size = SZ_2K;
+ cf->socket.io[0].NumPorts = SZ_2K;
+
+ status = pcmcia_register_socket(&cf->socket);
+ if (status < 0)
+ goto fail2;
+
+ return 0;
+
+fail2:
+ iounmap((void __iomem *) cf->socket.io_offset);
+ release_mem_region(AT91_CF_BASE, CF_SIZE);
+fail1:
+ if (board->irq_pin)
+ free_irq(board->irq_pin, cf);
+fail0a:
+ free_irq(board->det_pin, cf);
+fail0:
+ at91_sys_write(AT91_EBI_CSA, csa);
+ kfree(cf);
+ return status;
+}
+
+static int __exit at91_cf_remove(struct device *dev)
+{
+ struct at91_cf_socket *cf = dev_get_drvdata(dev);
+ unsigned int csa;
+
+ pcmcia_unregister_socket(&cf->socket);
+ free_irq(cf->board->irq_pin, cf);
+ free_irq(cf->board->det_pin, cf);
+ iounmap((void __iomem *) cf->socket.io_offset);
+ release_mem_region(AT91_CF_BASE, CF_SIZE);
+
+ csa = at91_sys_read(AT91_EBI_CSA);
+ at91_sys_write(AT91_EBI_CSA, csa & ~AT91_EBI_CS4A);
+
+ kfree(cf);
+ return 0;
+}
+
+static struct device_driver at91_cf_driver = {
+ .name = (char *) driver_name,
+ .bus = &platform_bus_type,
+ .probe = at91_cf_probe,
+ .remove = __exit_p(at91_cf_remove),
+ .suspend = pcmcia_socket_dev_suspend,
+ .resume = pcmcia_socket_dev_resume,
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int __init at91_cf_init(void)
+{
+ return driver_register(&at91_cf_driver);
+}
+module_init(at91_cf_init);
+
+static void __exit at91_cf_exit(void)
+{
+ driver_unregister(&at91_cf_driver);
+}
+module_exit(at91_cf_exit);
+
+MODULE_DESCRIPTION("AT91 Compact Flash Driver");
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 120fa8da63924..912c03e5eb0a3 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -12,7 +12,6 @@
* (C) 1999 David A. Hinds
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 613f2f1fbfddb..3162998579c17 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -16,7 +16,6 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/config.h>
#include <linux/string.h>
#include <linux/major.h>
#include <linux/errno.h>
@@ -111,9 +110,9 @@ int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state)
list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
if (socket->dev.dev != dev)
continue;
- down(&socket->skt_sem);
+ mutex_lock(&socket->skt_mutex);
socket_suspend(socket);
- up(&socket->skt_sem);
+ mutex_unlock(&socket->skt_mutex);
}
up_read(&pcmcia_socket_list_rwsem);
@@ -129,9 +128,9 @@ int pcmcia_socket_dev_resume(struct device *dev)
list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
if (socket->dev.dev != dev)
continue;
- down(&socket->skt_sem);
+ mutex_lock(&socket->skt_mutex);
socket_resume(socket);
- up(&socket->skt_sem);
+ mutex_unlock(&socket->skt_mutex);
}
up_read(&pcmcia_socket_list_rwsem);
@@ -237,7 +236,7 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
init_completion(&socket->socket_released);
init_completion(&socket->thread_done);
init_waitqueue_head(&socket->thread_wait);
- init_MUTEX(&socket->skt_sem);
+ mutex_init(&socket->skt_mutex);
spin_lock_init(&socket->thread_lock);
ret = kernel_thread(pccardd, socket, CLONE_KERNEL);
@@ -406,8 +405,6 @@ static void socket_shutdown(struct pcmcia_socket *s)
cb_free(s);
#endif
s->functions = 0;
- kfree(s->config);
- s->config = NULL;
s->ops->get_status(s, &status);
if (status & SS_POWERON) {
@@ -664,7 +661,7 @@ static int pccardd(void *__skt)
spin_unlock_irqrestore(&skt->thread_lock, flags);
if (events) {
- down(&skt->skt_sem);
+ mutex_lock(&skt->skt_mutex);
if (events & SS_DETECT)
socket_detect_change(skt);
if (events & SS_BATDEAD)
@@ -673,7 +670,7 @@ static int pccardd(void *__skt)
send_event(skt, CS_EVENT_BATTERY_LOW, CS_EVENT_PRI_LOW);
if (events & SS_READY)
send_event(skt, CS_EVENT_READY_CHANGE, CS_EVENT_PRI_LOW);
- up(&skt->skt_sem);
+ mutex_unlock(&skt->skt_mutex);
continue;
}
@@ -717,8 +714,8 @@ int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c)
{
int ret = 0;
- /* s->skt_sem also protects s->callback */
- down(&s->skt_sem);
+ /* s->skt_mutex also protects s->callback */
+ mutex_lock(&s->skt_mutex);
if (c) {
/* registration */
@@ -734,7 +731,7 @@ int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c)
} else
s->callback = NULL;
err:
- up(&s->skt_sem);
+ mutex_unlock(&s->skt_mutex);
return ret;
}
@@ -752,7 +749,7 @@ int pccard_reset_card(struct pcmcia_socket *skt)
cs_dbg(skt, 1, "resetting socket\n");
- down(&skt->skt_sem);
+ mutex_lock(&skt->skt_mutex);
do {
if (!(skt->state & SOCKET_PRESENT)) {
ret = CS_NO_CARD;
@@ -781,7 +778,7 @@ int pccard_reset_card(struct pcmcia_socket *skt)
ret = CS_SUCCESS;
} while (0);
- up(&skt->skt_sem);
+ mutex_unlock(&skt->skt_mutex);
return ret;
} /* reset_card */
@@ -797,7 +794,7 @@ int pcmcia_suspend_card(struct pcmcia_socket *skt)
cs_dbg(skt, 1, "suspending socket\n");
- down(&skt->skt_sem);
+ mutex_lock(&skt->skt_mutex);
do {
if (!(skt->state & SOCKET_PRESENT)) {
ret = CS_NO_CARD;
@@ -814,7 +811,7 @@ int pcmcia_suspend_card(struct pcmcia_socket *skt)
}
ret = socket_suspend(skt);
} while (0);
- up(&skt->skt_sem);
+ mutex_unlock(&skt->skt_mutex);
return ret;
} /* suspend_card */
@@ -827,7 +824,7 @@ int pcmcia_resume_card(struct pcmcia_socket *skt)
cs_dbg(skt, 1, "waking up socket\n");
- down(&skt->skt_sem);
+ mutex_lock(&skt->skt_mutex);
do {
if (!(skt->state & SOCKET_PRESENT)) {
ret = CS_NO_CARD;
@@ -841,7 +838,7 @@ int pcmcia_resume_card(struct pcmcia_socket *skt)
if (!ret && skt->callback)
skt->callback->resume(skt);
} while (0);
- up(&skt->skt_sem);
+ mutex_unlock(&skt->skt_mutex);
return ret;
} /* resume_card */
@@ -855,7 +852,7 @@ int pcmcia_eject_card(struct pcmcia_socket *skt)
cs_dbg(skt, 1, "user eject request\n");
- down(&skt->skt_sem);
+ mutex_lock(&skt->skt_mutex);
do {
if (!(skt->state & SOCKET_PRESENT)) {
ret = -ENODEV;
@@ -871,7 +868,7 @@ int pcmcia_eject_card(struct pcmcia_socket *skt)
socket_remove(skt);
ret = 0;
} while (0);
- up(&skt->skt_sem);
+ mutex_unlock(&skt->skt_mutex);
return ret;
} /* eject_card */
@@ -884,7 +881,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
cs_dbg(skt, 1, "user insert request\n");
- down(&skt->skt_sem);
+ mutex_lock(&skt->skt_mutex);
do {
if (skt->state & SOCKET_PRESENT) {
ret = -EBUSY;
@@ -896,7 +893,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
}
ret = 0;
} while (0);
- up(&skt->skt_sem);
+ mutex_unlock(&skt->skt_mutex);
return ret;
} /* insert_card */
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 7b37eba35bf13..d6164cd583fd1 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -15,7 +15,7 @@
#ifndef _LINUX_CS_INTERNAL_H
#define _LINUX_CS_INTERNAL_H
-#include <linux/config.h>
+#include <linux/kref.h>
/* Flags in client state */
#define CLIENT_CONFIG_LOCKED 0x0001
@@ -23,7 +23,7 @@
#define CLIENT_IO_REQ 0x0004
#define CLIENT_UNBOUND 0x0008
#define CLIENT_STALE 0x0010
-#define CLIENT_WIN_REQ(i) (0x20<<(i))
+#define CLIENT_WIN_REQ(i) (0x1<<(i))
#define CLIENT_CARDBUS 0x8000
#define REGION_MAGIC 0xE3C9
@@ -31,7 +31,7 @@ typedef struct region_t {
u_short region_magic;
u_short state;
dev_info_t dev_info;
- client_handle_t mtd;
+ struct pcmcia_device *mtd;
u_int MediaID;
region_info_t info;
} region_t;
@@ -40,12 +40,12 @@ typedef struct region_t {
/* Each card function gets one of these guys */
typedef struct config_t {
+ struct kref ref;
u_int state;
u_int Attributes;
u_int IntType;
u_int ConfigBase;
u_char Status, Pin, Copy, Option, ExtStatus;
- u_int Present;
u_int CardValues;
io_req_t io;
struct {
@@ -95,12 +95,6 @@ static inline void cs_socket_put(struct pcmcia_socket *skt)
}
}
-#define CHECK_SOCKET(s) \
- (((s) >= sockets) || (socket_table[s]->ops == NULL))
-
-#define SOCKET(h) (h->socket)
-#define CONFIG(h) (&SOCKET(h)->config[(h)->func])
-
/* In cardbus.c */
int cb_alloc(struct pcmcia_socket *s);
void cb_free(struct pcmcia_socket *s);
@@ -133,10 +127,9 @@ extern struct class_interface pccard_sysfs_interface;
extern struct rw_semaphore pcmcia_socket_list_rwsem;
extern struct list_head pcmcia_socket_list;
int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle, int idx, win_req_t *req);
-int pccard_get_configuration_info(struct pcmcia_socket *s, unsigned int function, config_info_t *config);
+int pccard_get_configuration_info(struct pcmcia_socket *s, struct pcmcia_device *p_dev, config_info_t *config);
int pccard_reset_card(struct pcmcia_socket *skt);
-int pccard_get_status(struct pcmcia_socket *s, unsigned int function, cs_status_t *status);
-int pccard_access_configuration_register(struct pcmcia_socket *s, unsigned int function, conf_reg_t *reg);
+int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev, cs_status_t *status);
struct pcmcia_callback{
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index bb96ce1db08c3..ae10d1eed65e2 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -10,10 +10,9 @@
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
* (C) 1999 David A. Hinds
- * (C) 2003 - 2005 Dominik Brodowski
+ * (C) 2003 - 2006 Dominik Brodowski
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -23,6 +22,7 @@
#include <linux/workqueue.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
+#include <linux/kref.h>
#define IN_CARD_SERVICES
#include <pcmcia/cs_types.h>
@@ -343,12 +343,19 @@ void pcmcia_put_dev(struct pcmcia_device *p_dev)
put_device(&p_dev->dev);
}
+static void pcmcia_release_function(struct kref *ref)
+{
+ struct config_t *c = container_of(ref, struct config_t, ref);
+ kfree(c);
+}
+
static void pcmcia_release_dev(struct device *dev)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
ds_dbg(1, "releasing dev %p\n", p_dev);
pcmcia_put_socket(p_dev->socket);
kfree(p_dev->devname);
+ kref_put(&p_dev->function_config->ref, pcmcia_release_function);
kfree(p_dev);
}
@@ -377,29 +384,12 @@ static int pcmcia_device_probe(struct device * dev)
p_drv = to_pcmcia_drv(dev->driver);
s = p_dev->socket;
- if ((!p_drv->probe) || (!try_module_get(p_drv->owner))) {
+ if ((!p_drv->probe) || (!p_dev->function_config) ||
+ (!try_module_get(p_drv->owner))) {
ret = -EINVAL;
goto put_dev;
}
- p_dev->state &= ~CLIENT_UNBOUND;
-
- /* set up the device configuration, if it hasn't been done before */
- if (!s->functions) {
- cistpl_longlink_mfc_t mfc;
- if (pccard_read_tuple(s, p_dev->func, CISTPL_LONGLINK_MFC,
- &mfc) == CS_SUCCESS)
- s->functions = mfc.nfn;
- else
- s->functions = 1;
- s->config = kzalloc(sizeof(config_t) * s->functions,
- GFP_KERNEL);
- if (!s->config) {
- ret = -ENOMEM;
- goto put_module;
- }
- }
-
ret = p_drv->probe(p_dev);
if (ret)
goto put_module;
@@ -425,15 +415,61 @@ static int pcmcia_device_probe(struct device * dev)
}
+/*
+ * Removes a PCMCIA card from the device tree and socket list.
+ */
+static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *leftover)
+{
+ struct pcmcia_device *p_dev;
+ struct pcmcia_device *tmp;
+ unsigned long flags;
+
+ ds_dbg(2, "unbind_request(%d)\n", s->sock);
+
+
+ if (!leftover)
+ s->device_count = 0;
+ else
+ s->device_count = 1;
+
+ /* unregister all pcmcia_devices registered with this socket, except leftover */
+ list_for_each_entry_safe(p_dev, tmp, &s->devices_list, socket_device_list) {
+ if (p_dev == leftover)
+ continue;
+
+ spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
+ list_del(&p_dev->socket_device_list);
+ p_dev->_removed=1;
+ spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
+
+ device_unregister(&p_dev->dev);
+ }
+
+ return;
+}
+
+
static int pcmcia_device_remove(struct device * dev)
{
struct pcmcia_device *p_dev;
struct pcmcia_driver *p_drv;
+ struct pcmcia_device_id *did;
int i;
- /* detach the "instance" */
p_dev = to_pcmcia_dev(dev);
p_drv = to_pcmcia_drv(dev->driver);
+
+ /* If we're removing the primary module driving a
+ * pseudo multi-function card, we need to unbind
+ * all devices
+ */
+ did = (struct pcmcia_device_id *) p_dev->dev.driver_data;
+ if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
+ (p_dev->socket->device_count != 0) &&
+ (p_dev->device_no == 0))
+ pcmcia_card_remove(p_dev->socket, p_dev);
+
+ /* detach the "instance" */
if (!p_drv)
return 0;
@@ -441,17 +477,16 @@ static int pcmcia_device_remove(struct device * dev)
p_drv->remove(p_dev);
/* check for proper unloading */
- if (p_dev->state & (CLIENT_IRQ_REQ|CLIENT_IO_REQ|CLIENT_CONFIG_LOCKED))
+ if (p_dev->_irq || p_dev->_io || p_dev->_locked)
printk(KERN_INFO "pcmcia: driver %s did not release config properly\n",
p_drv->drv.name);
for (i = 0; i < MAX_WIN; i++)
- if (p_dev->state & CLIENT_WIN_REQ(i))
+ if (p_dev->_win & CLIENT_WIN_REQ(i))
printk(KERN_INFO "pcmcia: driver %s did not release windows properly\n",
p_drv->drv.name);
/* references from pcmcia_probe_device */
- p_dev->state = CLIENT_UNBOUND;
pcmcia_put_dev(p_dev);
module_put(p_drv->owner);
@@ -460,37 +495,6 @@ static int pcmcia_device_remove(struct device * dev)
/*
- * Removes a PCMCIA card from the device tree and socket list.
- */
-static void pcmcia_card_remove(struct pcmcia_socket *s)
-{
- struct pcmcia_device *p_dev;
- unsigned long flags;
-
- ds_dbg(2, "unbind_request(%d)\n", s->sock);
-
- s->device_count = 0;
-
- for (;;) {
- /* unregister all pcmcia_devices registered with this socket*/
- spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
- if (list_empty(&s->devices_list)) {
- spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
- return;
- }
- p_dev = list_entry((&s->devices_list)->next, struct pcmcia_device, socket_device_list);
- list_del(&p_dev->socket_device_list);
- p_dev->state |= CLIENT_STALE;
- spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
-
- device_unregister(&p_dev->dev);
- }
-
- return;
-} /* unbind_request */
-
-
-/*
* pcmcia_device_query -- determine information about a pcmcia device
*/
static int pcmcia_device_query(struct pcmcia_device *p_dev)
@@ -546,7 +550,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
tmp = vers1->str + vers1->ofs[i];
length = strlen(tmp) + 1;
- if ((length < 3) || (length > 255))
+ if ((length < 2) || (length > 255))
continue;
p_dev->prod_id[i] = kmalloc(sizeof(char) * length,
@@ -571,11 +575,11 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
* won't work, this doesn't matter much at the moment: the driver core doesn't
* support it either.
*/
-static DECLARE_MUTEX(device_add_lock);
+static DEFINE_MUTEX(device_add_lock);
struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
{
- struct pcmcia_device *p_dev;
+ struct pcmcia_device *p_dev, *tmp_dev;
unsigned long flags;
int bus_id_len;
@@ -583,7 +587,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
if (!s)
return NULL;
- down(&device_add_lock);
+ mutex_lock(&device_add_lock);
/* max of 2 devices per card */
if (s->device_count == 2)
@@ -596,6 +600,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
p_dev->socket = s;
p_dev->device_no = (s->device_count++);
p_dev->func = function;
+ if (s->functions <= function)
+ s->functions = function + 1;
p_dev->dev.bus = &pcmcia_bus_type;
p_dev->dev.parent = s->dev.dev;
@@ -608,36 +614,55 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id);
/* compat */
- p_dev->state = CLIENT_UNBOUND;
+ spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
+
+ /*
+ * p_dev->function_config must be the same for all card functions.
+ * Note that this is serialized by the device_add_lock, so that
+ * only one such struct will be created.
+ */
+ list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
+ if (p_dev->func == tmp_dev->func) {
+ p_dev->function_config = tmp_dev->function_config;
+ kref_get(&p_dev->function_config->ref);
+ }
/* Add to the list in pcmcia_bus_socket */
- spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
list_add_tail(&p_dev->socket_device_list, &s->devices_list);
+
spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
+ if (!p_dev->function_config) {
+ p_dev->function_config = kzalloc(sizeof(struct config_t),
+ GFP_KERNEL);
+ if (!p_dev->function_config)
+ goto err_unreg;
+ kref_init(&p_dev->function_config->ref);
+ }
+
printk(KERN_NOTICE "pcmcia: registering new device %s\n",
p_dev->devname);
pcmcia_device_query(p_dev);
- if (device_register(&p_dev->dev)) {
- spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
- list_del(&p_dev->socket_device_list);
- spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
-
- goto err_free;
- }
+ if (device_register(&p_dev->dev))
+ goto err_unreg;
- up(&device_add_lock);
+ mutex_unlock(&device_add_lock);
return p_dev;
+ err_unreg:
+ spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
+ list_del(&p_dev->socket_device_list);
+ spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
+
err_free:
kfree(p_dev->devname);
kfree(p_dev);
s->device_count--;
err_put:
- up(&device_add_lock);
+ mutex_unlock(&device_add_lock);
pcmcia_put_socket(s);
return NULL;
@@ -696,7 +721,7 @@ static void pcmcia_bus_rescan(struct pcmcia_socket *skt)
int no_devices=0;
unsigned long flags;
- /* must be called with skt_sem held */
+ /* must be called with skt_mutex held */
spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
if (list_empty(&skt->devices_list))
no_devices=1;
@@ -819,9 +844,11 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
struct pcmcia_driver * p_drv = to_pcmcia_drv(drv);
struct pcmcia_device_id *did = p_drv->id_table;
+#ifdef CONFIG_PCMCIA_IOCTL
/* matching by cardmgr */
if (p_dev->cardmgr == p_drv)
return 1;
+#endif
while (did && did->match_flags) {
if (pcmcia_devmatch(p_dev, did))
@@ -927,7 +954,7 @@ static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
- if (p_dev->dev.power.power_state.event != PM_EVENT_ON)
+ if (p_dev->suspended)
return sprintf(buf, "off\n");
else
return sprintf(buf, "on\n");
@@ -942,11 +969,9 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
if (!count)
return -EINVAL;
- if ((p_dev->dev.power.power_state.event == PM_EVENT_ON) &&
- (!strncmp(buf, "off", 3)))
+ if ((!p_dev->suspended) && !strncmp(buf, "off", 3))
ret = dpm_runtime_suspend(dev, PMSG_SUSPEND);
- else if ((p_dev->dev.power.power_state.event != PM_EVENT_ON) &&
- (!strncmp(buf, "on", 2)))
+ else if (p_dev->suspended && !strncmp(buf, "on", 2))
dpm_runtime_resume(dev);
return ret ? ret : count;
@@ -982,9 +1007,9 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
if (!count)
return -EINVAL;
- down(&p_dev->socket->skt_sem);
+ mutex_lock(&p_dev->socket->skt_mutex);
p_dev->allow_func_id_match = 1;
- up(&p_dev->socket->skt_sem);
+ mutex_unlock(&p_dev->socket->skt_mutex);
bus_rescan_devices(&pcmcia_bus_type);
@@ -1012,14 +1037,27 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
struct pcmcia_driver *p_drv = NULL;
+ int ret = 0;
if (dev->driver)
p_drv = to_pcmcia_drv(dev->driver);
- if (p_drv && p_drv->suspend)
- return p_drv->suspend(p_dev);
+ if (!p_drv)
+ goto out;
- return 0;
+ if (p_drv->suspend) {
+ ret = p_drv->suspend(p_dev);
+ if (ret)
+ goto out;
+ }
+
+ if (p_dev->device_no == p_dev->func)
+ pcmcia_release_configuration(p_dev);
+
+ out:
+ if (!ret)
+ p_dev->suspended = 1;
+ return ret;
}
@@ -1027,14 +1065,27 @@ static int pcmcia_dev_resume(struct device * dev)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
struct pcmcia_driver *p_drv = NULL;
+ int ret = 0;
if (dev->driver)
p_drv = to_pcmcia_drv(dev->driver);
- if (p_drv && p_drv->resume)
- return p_drv->resume(p_dev);
+ if (!p_drv)
+ goto out;
- return 0;
+ if (p_dev->device_no == p_dev->func) {
+ ret = pcmcia_request_configuration(p_dev, &p_dev->conf);
+ if (ret)
+ goto out;
+ }
+
+ if (p_drv->resume)
+ ret = p_drv->resume(p_dev);
+
+ out:
+ if (!ret)
+ p_dev->suspended = 0;
+ return ret;
}
@@ -1100,7 +1151,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
switch (event) {
case CS_EVENT_CARD_REMOVAL:
s->pcmcia_state.present = 0;
- pcmcia_card_remove(skt);
+ pcmcia_card_remove(skt, NULL);
handle_event(skt, event);
break;
@@ -1128,6 +1179,32 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
} /* ds_event */
+struct pcmcia_device * pcmcia_dev_present(struct pcmcia_device *_p_dev)
+{
+ struct pcmcia_device *p_dev;
+ struct pcmcia_device *ret = NULL;
+
+ p_dev = pcmcia_get_dev(_p_dev);
+ if (!p_dev)
+ return NULL;
+
+ if (!p_dev->socket->pcmcia_state.present)
+ goto out;
+
+ if (p_dev->_removed)
+ goto out;
+
+ if (p_dev->suspended)
+ goto out;
+
+ ret = p_dev;
+ out:
+ pcmcia_put_dev(p_dev);
+ return ret;
+}
+EXPORT_SYMBOL(pcmcia_dev_present);
+
+
static struct pcmcia_callback pcmcia_bus_callback = {
.owner = THIS_MODULE,
.event = ds_event,
diff --git a/drivers/pcmcia/ds_internal.h b/drivers/pcmcia/ds_internal.h
index d359bd25a51ca..3a2b25e6ed732 100644
--- a/drivers/pcmcia/ds_internal.h
+++ b/drivers/pcmcia/ds_internal.h
@@ -8,6 +8,8 @@ extern void pcmcia_put_dev(struct pcmcia_device *p_dev);
struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function);
+extern int pcmcia_release_configuration(struct pcmcia_device *p_dev);
+
#ifdef CONFIG_PCMCIA_IOCTL
extern void __init pcmcia_setup_ioctl(void);
extern void __exit pcmcia_cleanup_ioctl(void);
@@ -15,7 +17,7 @@ extern void handle_event(struct pcmcia_socket *s, event_t event);
extern int handle_request(struct pcmcia_socket *s, event_t event);
#else
static inline void __init pcmcia_setup_ioctl(void) { return; }
-static inline void __init pcmcia_cleanup_ioctl(void) { return; }
+static inline void __exit pcmcia_cleanup_ioctl(void) { return; }
static inline void handle_event(struct pcmcia_socket *s, event_t event) { return; }
static inline int handle_request(struct pcmcia_socket *s, event_t event) { return CS_SUCCESS; }
#endif
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 7979c85df3dc6..d5f03a338c6c5 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 35a92d1e4945b..bd0308e898153 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/string.h>
diff --git a/drivers/pcmcia/pcmcia_compat.c b/drivers/pcmcia/pcmcia_compat.c
deleted file mode 100644
index ebb161c4f819f..0000000000000
--- a/drivers/pcmcia/pcmcia_compat.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * PCMCIA 16-bit compatibility functions
- *
- * The initial developer of the original code is David A. Hinds
- * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- *
- * Copyright (C) 2004 Dominik Brodowski
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#define IN_CARD_SERVICES
-#include <pcmcia/cs_types.h>
-#include <pcmcia/cs.h>
-#include <pcmcia/bulkmem.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/ss.h>
-
-#include "cs_internal.h"
-
-int pcmcia_get_first_tuple(struct pcmcia_device *p_dev, tuple_t *tuple)
-{
- return pccard_get_first_tuple(p_dev->socket, p_dev->func, tuple);
-}
-EXPORT_SYMBOL(pcmcia_get_first_tuple);
-
-int pcmcia_get_next_tuple(struct pcmcia_device *p_dev, tuple_t *tuple)
-{
- return pccard_get_next_tuple(p_dev->socket, p_dev->func, tuple);
-}
-EXPORT_SYMBOL(pcmcia_get_next_tuple);
-
-int pcmcia_get_tuple_data(struct pcmcia_device *p_dev, tuple_t *tuple)
-{
- return pccard_get_tuple_data(p_dev->socket, tuple);
-}
-EXPORT_SYMBOL(pcmcia_get_tuple_data);
-
-int pcmcia_parse_tuple(struct pcmcia_device *p_dev, tuple_t *tuple, cisparse_t *parse)
-{
- return pccard_parse_tuple(tuple, parse);
-}
-EXPORT_SYMBOL(pcmcia_parse_tuple);
-
-int pcmcia_validate_cis(struct pcmcia_device *p_dev, cisinfo_t *info)
-{
- return pccard_validate_cis(p_dev->socket, p_dev->func, info);
-}
-EXPORT_SYMBOL(pcmcia_validate_cis);
-
-
-int pcmcia_reset_card(struct pcmcia_device *p_dev, client_req_t *req)
-{
- return pccard_reset_card(p_dev->socket);
-}
-EXPORT_SYMBOL(pcmcia_reset_card);
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 80969f7e7a0be..c53db7ceda5e2 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -18,7 +18,6 @@
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -70,10 +69,26 @@ extern int ds_pc_debug;
#define ds_dbg(lvl, fmt, arg...) do { } while (0)
#endif
+static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s,
+ unsigned int function)
+{
+ struct pcmcia_device *p_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
+ list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
+ if (p_dev->func == function) {
+ spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
+ return pcmcia_get_dev(p_dev);
+ }
+ }
+ spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
+ return NULL;
+}
/* backwards-compatible accessing of driver --- by name! */
-static struct pcmcia_driver * get_pcmcia_driver (dev_info_t *dev_info)
+static struct pcmcia_driver *get_pcmcia_driver(dev_info_t *dev_info)
{
struct device_driver *drv;
struct pcmcia_driver *p_drv;
@@ -214,7 +229,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
* by userspace before, we need to
* return the "instance". */
spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
- bind_info->instance = p_dev->instance;
+ bind_info->instance = p_dev;
ret = -EBUSY;
goto err_put_module;
} else {
@@ -253,9 +268,9 @@ rescan:
/*
* Prevent this racing with a card insertion.
*/
- down(&s->skt_sem);
+ mutex_lock(&s->skt_mutex);
bus_rescan_devices(&pcmcia_bus_type);
- up(&s->skt_sem);
+ mutex_unlock(&s->skt_mutex);
/* check whether the driver indeed matched. I don't care if this
* is racy or not, because it can only happen on cardmgr access
@@ -289,6 +304,7 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
{
dev_node_t *node;
struct pcmcia_device *p_dev;
+ struct pcmcia_driver *p_drv;
unsigned long flags;
int ret = 0;
@@ -343,16 +359,16 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
found:
spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
- if ((!p_dev->instance) ||
- (p_dev->instance->state & DEV_CONFIG_PENDING)) {
+ p_drv = to_pcmcia_drv(p_dev->dev.driver);
+ if (p_drv && !p_dev->_locked) {
ret = -EAGAIN;
goto err_put;
}
if (first)
- node = p_dev->instance->dev;
+ node = p_dev->dev_node;
else
- for (node = p_dev->instance->dev; node; node = node->next)
+ for (node = p_dev->dev_node; node; node = node->next)
if (node == bind_info->next)
break;
if (!node) {
@@ -583,14 +599,16 @@ static int ds_ioctl(struct inode * inode, struct file * file,
if (buf->config.Function &&
(buf->config.Function >= s->functions))
ret = CS_BAD_ARGS;
- else
- ret = pccard_get_configuration_info(s,
- buf->config.Function, &buf->config);
+ else {
+ struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function);
+ ret = pccard_get_configuration_info(s, p_dev, &buf->config);
+ pcmcia_put_dev(p_dev);
+ }
break;
case DS_GET_FIRST_TUPLE:
- down(&s->skt_sem);
+ mutex_lock(&s->skt_mutex);
pcmcia_validate_mem(s);
- up(&s->skt_sem);
+ mutex_unlock(&s->skt_mutex);
ret = pccard_get_first_tuple(s, BIND_FN_ALL, &buf->tuple);
break;
case DS_GET_NEXT_TUPLE:
@@ -609,16 +627,19 @@ static int ds_ioctl(struct inode * inode, struct file * file,
ret = pccard_reset_card(s);
break;
case DS_GET_STATUS:
- if (buf->status.Function &&
- (buf->status.Function >= s->functions))
- ret = CS_BAD_ARGS;
- else
- ret = pccard_get_status(s, buf->status.Function, &buf->status);
- break;
+ if (buf->status.Function &&
+ (buf->status.Function >= s->functions))
+ ret = CS_BAD_ARGS;
+ else {
+ struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function);
+ ret = pccard_get_status(s, p_dev, &buf->status);
+ pcmcia_put_dev(p_dev);
+ }
+ break;
case DS_VALIDATE_CIS:
- down(&s->skt_sem);
+ mutex_lock(&s->skt_mutex);
pcmcia_validate_mem(s);
- up(&s->skt_sem);
+ mutex_unlock(&s->skt_mutex);
ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo);
break;
case DS_SUSPEND_CARD:
@@ -638,12 +659,16 @@ static int ds_ioctl(struct inode * inode, struct file * file,
err = -EPERM;
goto free_out;
}
- if (buf->conf_reg.Function &&
- (buf->conf_reg.Function >= s->functions))
- ret = CS_BAD_ARGS;
- else
- ret = pccard_access_configuration_register(s,
- buf->conf_reg.Function, &buf->conf_reg);
+
+ ret = CS_BAD_ARGS;
+
+ if (!(buf->conf_reg.Function &&
+ (buf->conf_reg.Function >= s->functions))) {
+ struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function);
+ if (p_dev)
+ ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg);
+ pcmcia_put_dev(p_dev);
+ }
break;
case DS_GET_FIRST_REGION:
case DS_GET_NEXT_REGION:
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 89022ad5b5207..45063b4e5b780 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -14,7 +14,6 @@
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -89,7 +88,7 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base,
}
if ((s->features & SS_CAP_STATIC_MAP) && s->io_offset) {
*base = s->io_offset | (*base & 0x0fff);
- s->io[0].Attributes = attr;
+ s->io[0].res->flags = (s->io[0].res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS);
return 0;
}
/* Check for an already-allocated window that must conflict with
@@ -97,38 +96,36 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base,
* potential conflicts, just the most obvious ones.
*/
for (i = 0; i < MAX_IO_WIN; i++)
- if ((s->io[i].NumPorts != 0) &&
- ((s->io[i].BasePort & (align-1)) == *base))
+ if ((s->io[i].res) &&
+ ((s->io[i].res->start & (align-1)) == *base))
return 1;
for (i = 0; i < MAX_IO_WIN; i++) {
- if (s->io[i].NumPorts == 0) {
+ if (!s->io[i].res) {
s->io[i].res = pcmcia_find_io_region(*base, num, align, s);
if (s->io[i].res) {
- s->io[i].Attributes = attr;
- s->io[i].BasePort = *base = s->io[i].res->start;
- s->io[i].NumPorts = s->io[i].InUse = num;
+ *base = s->io[i].res->start;
+ s->io[i].res->flags = (s->io[i].res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS);
+ s->io[i].InUse = num;
break;
} else
return 1;
- } else if (s->io[i].Attributes != attr)
+ } else if ((s->io[i].res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS))
continue;
/* Try to extend top of window */
- try = s->io[i].BasePort + s->io[i].NumPorts;
+ try = s->io[i].res->end + 1;
if ((*base == 0) || (*base == try))
if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start,
s->io[i].res->end + num, s) == 0) {
*base = try;
- s->io[i].NumPorts += num;
s->io[i].InUse += num;
break;
}
/* Try to extend bottom of window */
- try = s->io[i].BasePort - num;
+ try = s->io[i].res->start - num;
if ((*base == 0) || (*base == try))
if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start - num,
s->io[i].res->end, s) == 0) {
- s->io[i].BasePort = *base = try;
- s->io[i].NumPorts += num;
+ *base = try;
s->io[i].InUse += num;
break;
}
@@ -143,12 +140,13 @@ static void release_io_space(struct pcmcia_socket *s, ioaddr_t base,
int i;
for (i = 0; i < MAX_IO_WIN; i++) {
- if ((s->io[i].BasePort <= base) &&
- (s->io[i].BasePort+s->io[i].NumPorts >= base+num)) {
+ if (!s->io[i].res)
+ continue;
+ if ((s->io[i].res->start <= base) &&
+ (s->io[i].res->end >= base+num-1)) {
s->io[i].InUse -= num;
/* Free the window if no one else is using it */
if (s->io[i].InUse == 0) {
- s->io[i].NumPorts = 0;
release_resource(s->io[i].res);
kfree(s->io[i].res);
s->io[i].res = NULL;
@@ -165,21 +163,19 @@ static void release_io_space(struct pcmcia_socket *s, ioaddr_t base,
* this and the tuple reading services.
*/
-int pccard_access_configuration_register(struct pcmcia_socket *s,
- unsigned int function,
+int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
conf_reg_t *reg)
{
+ struct pcmcia_socket *s;
config_t *c;
int addr;
u_char val;
- if (!s || !s->config)
+ if (!p_dev || !p_dev->function_config)
return CS_NO_CARD;
- c = &s->config[function];
-
- if (c == NULL)
- return CS_NO_CARD;
+ s = p_dev->socket;
+ c = p_dev->function_config;
if (!(c->state & CONFIG_LOCKED))
return CS_CONFIGURATION_LOCKED;
@@ -200,20 +196,12 @@ int pccard_access_configuration_register(struct pcmcia_socket *s,
break;
}
return CS_SUCCESS;
-} /* pccard_access_configuration_register */
-
-int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
- conf_reg_t *reg)
-{
- return pccard_access_configuration_register(p_dev->socket,
- p_dev->func, reg);
-}
+} /* pcmcia_access_configuration_register */
EXPORT_SYMBOL(pcmcia_access_configuration_register);
-
int pccard_get_configuration_info(struct pcmcia_socket *s,
- unsigned int function,
+ struct pcmcia_device *p_dev,
config_info_t *config)
{
config_t *c;
@@ -221,7 +209,7 @@ int pccard_get_configuration_info(struct pcmcia_socket *s,
if (!(s->state & SOCKET_PRESENT))
return CS_NO_CARD;
- config->Function = function;
+ config->Function = p_dev->func;
#ifdef CONFIG_CARDBUS
if (s->state & SOCKET_CARDBUS) {
@@ -235,14 +223,14 @@ int pccard_get_configuration_info(struct pcmcia_socket *s,
config->AssignedIRQ = s->irq.AssignedIRQ;
if (config->AssignedIRQ)
config->Attributes |= CONF_ENABLE_IRQ;
- config->BasePort1 = s->io[0].BasePort;
- config->NumPorts1 = s->io[0].NumPorts;
+ config->BasePort1 = s->io[0].res->start;
+ config->NumPorts1 = s->io[0].res->end - config->BasePort1 + 1;
}
return CS_SUCCESS;
}
#endif
- c = (s->config != NULL) ? &s->config[function] : NULL;
+ c = (p_dev) ? p_dev->function_config : NULL;
if ((c == NULL) || !(c->state & CONFIG_LOCKED)) {
config->Attributes = 0;
@@ -271,7 +259,7 @@ int pccard_get_configuration_info(struct pcmcia_socket *s,
int pcmcia_get_configuration_info(struct pcmcia_device *p_dev,
config_info_t *config)
{
- return pccard_get_configuration_info(p_dev->socket, p_dev->func,
+ return pccard_get_configuration_info(p_dev->socket, p_dev,
config);
}
EXPORT_SYMBOL(pcmcia_get_configuration_info);
@@ -317,7 +305,7 @@ EXPORT_SYMBOL(pcmcia_get_window);
* SocketState yet: I haven't seen any point for it.
*/
-int pccard_get_status(struct pcmcia_socket *s, unsigned int function,
+int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev,
cs_status_t *status)
{
config_t *c;
@@ -334,11 +322,12 @@ int pccard_get_status(struct pcmcia_socket *s, unsigned int function,
if (!(s->state & SOCKET_PRESENT))
return CS_NO_CARD;
- c = (s->config != NULL) ? &s->config[function] : NULL;
+ c = (p_dev) ? p_dev->function_config : NULL;
+
if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
(c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
u_char reg;
- if (c->Present & PRESENT_PIN_REPLACE) {
+ if (c->CardValues & PRESENT_PIN_REPLACE) {
pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
status->CardState |=
(reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
@@ -352,7 +341,7 @@ int pccard_get_status(struct pcmcia_socket *s, unsigned int function,
/* No PRR? Then assume we're always ready */
status->CardState |= CS_EVENT_READY_CHANGE;
}
- if (c->Present & PRESENT_EXT_STATUS) {
+ if (c->CardValues & PRESENT_EXT_STATUS) {
pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
status->CardState |=
(reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
@@ -370,11 +359,9 @@ int pccard_get_status(struct pcmcia_socket *s, unsigned int function,
return CS_SUCCESS;
} /* pccard_get_status */
-int pcmcia_get_status(client_handle_t handle, cs_status_t *status)
+int pcmcia_get_status(struct pcmcia_device *p_dev, cs_status_t *status)
{
- struct pcmcia_socket *s;
- s = SOCKET(handle);
- return pccard_get_status(s, handle->func, status);
+ return pccard_get_status(p_dev->socket, p_dev, status);
}
EXPORT_SYMBOL(pcmcia_get_status);
@@ -422,7 +409,8 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
config_t *c;
s = p_dev->socket;
- c = CONFIG(p_dev);
+ c = p_dev->function_config;
+
if (!(s->state & SOCKET_PRESENT))
return CS_NO_CARD;
if (!(c->state & CONFIG_LOCKED))
@@ -454,6 +442,28 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
(mod->Attributes & CONF_VPP2_CHANGE_VALID))
return CS_BAD_VPP;
+ if (mod->Attributes & CONF_IO_CHANGE_WIDTH) {
+ pccard_io_map io_off = { 0, 0, 0, 0, 1 };
+ pccard_io_map io_on;
+ int i;
+
+ io_on.speed = io_speed;
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (!s->io[i].res)
+ continue;
+ io_off.map = i;
+ io_on.map = i;
+
+ io_on.flags = MAP_ACTIVE | IO_DATA_PATH_WIDTH_8;
+ io_on.start = s->io[i].res->start;
+ io_on.stop = s->io[i].res->end;
+
+ s->ops->set_io_map(s, &io_off);
+ mdelay(40);
+ s->ops->set_io_map(s, &io_on);
+ }
+ }
+
return CS_SUCCESS;
} /* modify_configuration */
EXPORT_SYMBOL(pcmcia_modify_configuration);
@@ -463,23 +473,23 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
{
pccard_io_map io = { 0, 0, 0, 0, 1 };
struct pcmcia_socket *s = p_dev->socket;
+ config_t *c = p_dev->function_config;
int i;
- if (!(p_dev->state & CLIENT_CONFIG_LOCKED))
- return CS_BAD_HANDLE;
- p_dev->state &= ~CLIENT_CONFIG_LOCKED;
-
- if (!(p_dev->state & CLIENT_STALE)) {
- config_t *c = CONFIG(p_dev);
+ if (p_dev->_locked) {
+ p_dev->_locked = 0;
if (--(s->lock_count) == 0) {
s->socket.flags = SS_OUTPUT_ENA; /* Is this correct? */
s->socket.Vpp = 0;
s->socket.io_irq = 0;
s->ops->set_socket(s, &s->socket);
}
+ }
+ if (c->state & CONFIG_LOCKED) {
+ c->state &= ~CONFIG_LOCKED;
if (c->state & CONFIG_IO_REQ)
for (i = 0; i < MAX_IO_WIN; i++) {
- if (s->io[i].NumPorts == 0)
+ if (!s->io[i].res)
continue;
s->io[i].Config--;
if (s->io[i].Config != 0)
@@ -487,12 +497,10 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
io.map = i;
s->ops->set_io_map(s, &io);
}
- c->state &= ~CONFIG_LOCKED;
}
return CS_SUCCESS;
} /* pcmcia_release_configuration */
-EXPORT_SYMBOL(pcmcia_release_configuration);
/** pcmcia_release_io
@@ -503,25 +511,23 @@ EXPORT_SYMBOL(pcmcia_release_configuration);
* don't bother checking the port ranges against the current socket
* values.
*/
-int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
+static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
{
struct pcmcia_socket *s = p_dev->socket;
+ config_t *c = p_dev->function_config;
- if (!(p_dev->state & CLIENT_IO_REQ))
+ if (!p_dev->_io )
return CS_BAD_HANDLE;
- p_dev->state &= ~CLIENT_IO_REQ;
-
- if (!(p_dev->state & CLIENT_STALE)) {
- config_t *c = CONFIG(p_dev);
- if (c->state & CONFIG_LOCKED)
- return CS_CONFIGURATION_LOCKED;
- if ((c->io.BasePort1 != req->BasePort1) ||
- (c->io.NumPorts1 != req->NumPorts1) ||
- (c->io.BasePort2 != req->BasePort2) ||
- (c->io.NumPorts2 != req->NumPorts2))
- return CS_BAD_ARGS;
- c->state &= ~CONFIG_IO_REQ;
- }
+
+ p_dev->_io = 0;
+
+ if ((c->io.BasePort1 != req->BasePort1) ||
+ (c->io.NumPorts1 != req->NumPorts1) ||
+ (c->io.BasePort2 != req->BasePort2) ||
+ (c->io.NumPorts2 != req->NumPorts2))
+ return CS_BAD_ARGS;
+
+ c->state &= ~CONFIG_IO_REQ;
release_io_space(s, req->BasePort1, req->NumPorts1);
if (req->NumPorts2)
@@ -529,28 +535,26 @@ int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
return CS_SUCCESS;
} /* pcmcia_release_io */
-EXPORT_SYMBOL(pcmcia_release_io);
-int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
+static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
{
struct pcmcia_socket *s = p_dev->socket;
- if (!(p_dev->state & CLIENT_IRQ_REQ))
+ config_t *c= p_dev->function_config;
+
+ if (!p_dev->_irq)
return CS_BAD_HANDLE;
- p_dev->state &= ~CLIENT_IRQ_REQ;
-
- if (!(p_dev->state & CLIENT_STALE)) {
- config_t *c = CONFIG(p_dev);
- if (c->state & CONFIG_LOCKED)
- return CS_CONFIGURATION_LOCKED;
- if (c->irq.Attributes != req->Attributes)
- return CS_BAD_ATTRIBUTE;
- if (s->irq.AssignedIRQ != req->AssignedIRQ)
- return CS_BAD_IRQ;
- if (--s->irq.Config == 0) {
- c->state &= ~CONFIG_IRQ_REQ;
- s->irq.AssignedIRQ = 0;
- }
+ p_dev->_irq = 0;
+
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if (c->irq.Attributes != req->Attributes)
+ return CS_BAD_ATTRIBUTE;
+ if (s->irq.AssignedIRQ != req->AssignedIRQ)
+ return CS_BAD_IRQ;
+ if (--s->irq.Config == 0) {
+ c->state &= ~CONFIG_IRQ_REQ;
+ s->irq.AssignedIRQ = 0;
}
if (req->Attributes & IRQ_HANDLE_PRESENT) {
@@ -563,7 +567,6 @@ int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
return CS_SUCCESS;
} /* pcmcia_release_irq */
-EXPORT_SYMBOL(pcmcia_release_irq);
int pcmcia_release_window(window_handle_t win)
@@ -573,7 +576,7 @@ int pcmcia_release_window(window_handle_t win)
if ((win == NULL) || (win->magic != WINDOW_MAGIC))
return CS_BAD_HANDLE;
s = win->sock;
- if (!(win->handle->state & CLIENT_WIN_REQ(win->index)))
+ if (!(win->handle->_win & CLIENT_WIN_REQ(win->index)))
return CS_BAD_HANDLE;
/* Shut down memory window */
@@ -587,7 +590,7 @@ int pcmcia_release_window(window_handle_t win)
kfree(win->ctl.res);
win->ctl.res = NULL;
}
- win->handle->state &= ~CLIENT_WIN_REQ(win->index);
+ win->handle->_win &= ~CLIENT_WIN_REQ(win->index);
win->magic = 0;
@@ -610,16 +613,12 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
if (req->IntType & INT_CARDBUS)
return CS_UNSUPPORTED_MODE;
- c = CONFIG(p_dev);
+ c = p_dev->function_config;
if (c->state & CONFIG_LOCKED)
return CS_CONFIGURATION_LOCKED;
/* Do power control. We don't allow changes in Vcc. */
- if (s->socket.Vcc != req->Vcc)
- return CS_BAD_VCC;
- if (req->Vpp1 != req->Vpp2)
- return CS_BAD_VPP;
- s->socket.Vpp = req->Vpp1;
+ s->socket.Vpp = req->Vpp;
if (s->ops->set_socket(s, &s->socket))
return CS_BAD_VPP;
@@ -643,7 +642,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
/* Set up CIS configuration registers */
base = c->ConfigBase = req->ConfigBase;
- c->Present = c->CardValues = req->Present;
+ c->CardValues = req->Present;
if (req->Present & PRESENT_COPY) {
c->Copy = req->Copy;
pcmcia_write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &c->Copy);
@@ -690,10 +689,10 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
if (c->state & CONFIG_IO_REQ) {
iomap.speed = io_speed;
for (i = 0; i < MAX_IO_WIN; i++)
- if (s->io[i].NumPorts != 0) {
+ if (s->io[i].res) {
iomap.map = i;
iomap.flags = MAP_ACTIVE;
- switch (s->io[i].Attributes & IO_DATA_PATH_WIDTH) {
+ switch (s->io[i].res->flags & IO_DATA_PATH_WIDTH) {
case IO_DATA_PATH_WIDTH_16:
iomap.flags |= MAP_16BIT; break;
case IO_DATA_PATH_WIDTH_AUTO:
@@ -701,15 +700,15 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
default:
break;
}
- iomap.start = s->io[i].BasePort;
- iomap.stop = iomap.start + s->io[i].NumPorts - 1;
+ iomap.start = s->io[i].res->start;
+ iomap.stop = s->io[i].res->end;
s->ops->set_io_map(s, &iomap);
s->io[i].Config++;
}
}
c->state |= CONFIG_LOCKED;
- p_dev->state |= CLIENT_CONFIG_LOCKED;
+ p_dev->_locked = 1;
return CS_SUCCESS;
} /* pcmcia_request_configuration */
EXPORT_SYMBOL(pcmcia_request_configuration);
@@ -730,7 +729,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req)
if (!req)
return CS_UNSUPPORTED_MODE;
- c = CONFIG(p_dev);
+ c = p_dev->function_config;
if (c->state & CONFIG_LOCKED)
return CS_CONFIGURATION_LOCKED;
if (c->state & CONFIG_IO_REQ)
@@ -755,7 +754,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req)
c->io = *req;
c->state |= CONFIG_IO_REQ;
- p_dev->state |= CLIENT_IO_REQ;
+ p_dev->_io = 1;
return CS_SUCCESS;
} /* pcmcia_request_io */
EXPORT_SYMBOL(pcmcia_request_io);
@@ -786,7 +785,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
if (!(s->state & SOCKET_PRESENT))
return CS_NO_CARD;
- c = CONFIG(p_dev);
+ c = p_dev->function_config;
if (c->state & CONFIG_LOCKED)
return CS_CONFIGURATION_LOCKED;
if (c->state & CONFIG_IRQ_REQ)
@@ -851,7 +850,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
s->irq.Config++;
c->state |= CONFIG_IRQ_REQ;
- p_dev->state |= CLIENT_IRQ_REQ;
+ p_dev->_irq = 1;
#ifdef CONFIG_PCMCIA_PROBE
pcmcia_used_irq[irq]++;
@@ -911,7 +910,7 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h
if (!win->ctl.res)
return CS_IN_USE;
}
- (*p_dev)->state |= CLIENT_WIN_REQ(w);
+ (*p_dev)->_win |= CLIENT_WIN_REQ(w);
/* Configure the socket controller */
win->ctl.map = w+1;
@@ -941,3 +940,14 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h
return CS_SUCCESS;
} /* pcmcia_request_window */
EXPORT_SYMBOL(pcmcia_request_window);
+
+void pcmcia_disable_device(struct pcmcia_device *p_dev) {
+ pcmcia_release_configuration(p_dev);
+ pcmcia_release_io(p_dev, &p_dev->io);
+ pcmcia_release_irq(p_dev, &p_dev->irq);
+ if (&p_dev->win)
+ pcmcia_release_window(p_dev->win);
+
+ p_dev->dev_node = NULL;
+}
+EXPORT_SYMBOL(pcmcia_disable_device);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index f2789afb22b24..16d1ea7b0a18a 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index 514609369836e..81dfc2cac2b4f 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -12,7 +12,6 @@
* (C) 1999 David A. Hinds
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -22,6 +21,8 @@
#include "cs_internal.h"
+#ifdef CONFIG_PCMCIA_IOCTL
+
#ifdef CONFIG_PCMCIA_PROBE
static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
@@ -98,6 +99,8 @@ int pcmcia_adjust_resource_info(adjust_t *adj)
}
EXPORT_SYMBOL(pcmcia_adjust_resource_info);
+#endif
+
int pcmcia_validate_mem(struct pcmcia_socket *s)
{
if (s->resource_ops->validate_mem)
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 5301ac60358f7..0f8b157c97177 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -12,7 +12,6 @@
* (C) 1999 David A. Hinds
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -61,7 +60,7 @@ struct socket_data {
unsigned int rsrc_mem_probe;
};
-static DECLARE_MUTEX(rsrc_sem);
+static DEFINE_MUTEX(rsrc_mutex);
#define MEM_PROBE_LOW (1 << 0)
#define MEM_PROBE_HIGH (1 << 1)
@@ -484,7 +483,7 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
/*
- * Locking note: Must be called with skt_sem held!
+ * Locking note: Must be called with skt_mutex held!
*/
static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
{
@@ -495,7 +494,7 @@ static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
if (!probe_mem)
return 0;
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
if (s->features & SS_CAP_PAGE_REGS)
probe_mask = MEM_PROBE_HIGH;
@@ -507,7 +506,7 @@ static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
s_data->rsrc_mem_probe |= probe_mask;
}
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
return ret;
}
@@ -585,7 +584,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
struct socket_data *s_data = s->resource_data;
int ret = -ENOMEM;
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
for (m = s_data->io_db.next; m != &s_data->io_db; m = m->next) {
unsigned long start = m->base;
unsigned long end = m->base + m->num - 1;
@@ -596,7 +595,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
ret = adjust_resource(res, r_start, r_end - r_start + 1);
break;
}
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
return ret;
}
@@ -630,7 +629,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
data.offset = base & data.mask;
data.map = &s_data->io_db;
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
#ifdef CONFIG_PCI
if (s->cb_dev) {
ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
@@ -639,7 +638,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
#endif
ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
1, pcmcia_align, &data);
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
if (ret != 0) {
kfree(res);
@@ -672,7 +671,7 @@ static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
min = 0x100000UL + base;
}
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
#ifdef CONFIG_PCI
if (s->cb_dev) {
ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num,
@@ -682,7 +681,7 @@ static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
#endif
ret = allocate_resource(&iomem_resource, res, num, min,
max, 1, pcmcia_align, &data);
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
if (ret == 0 || low)
break;
low = 1;
@@ -705,7 +704,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
if (end < start)
return -EINVAL;
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
switch (action) {
case ADD_MANAGED_RESOURCE:
ret = add_interval(&data->mem_db, start, size);
@@ -723,7 +722,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
default:
ret = -EINVAL;
}
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
return ret;
}
@@ -741,7 +740,7 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
if (end > IO_SPACE_LIMIT)
return -EINVAL;
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
switch (action) {
case ADD_MANAGED_RESOURCE:
if (add_interval(&data->io_db, start, size) != 0) {
@@ -760,7 +759,7 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
ret = -EINVAL;
break;
}
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
return ret;
}
@@ -867,7 +866,7 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
struct socket_data *data = s->resource_data;
struct resource_map *p, *q;
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
for (p = data->mem_db.next; p != &data->mem_db; p = q) {
q = p->next;
kfree(p);
@@ -876,7 +875,7 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
q = p->next;
kfree(p);
}
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
}
@@ -901,7 +900,7 @@ static ssize_t show_io_db(struct class_device *class_dev, char *buf)
struct resource_map *p;
ssize_t ret = 0;
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
data = s->resource_data;
for (p = data->io_db.next; p != &data->io_db; p = p->next) {
@@ -913,7 +912,7 @@ static ssize_t show_io_db(struct class_device *class_dev, char *buf)
((unsigned long) p->base + p->num - 1));
}
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
return (ret);
}
@@ -953,7 +952,7 @@ static ssize_t show_mem_db(struct class_device *class_dev, char *buf)
struct resource_map *p;
ssize_t ret = 0;
- down(&rsrc_sem);
+ mutex_lock(&rsrc_mutex);
data = s->resource_data;
for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
@@ -965,7 +964,7 @@ static ssize_t show_mem_db(struct class_device *class_dev, char *buf)
((unsigned long) p->base + p->num - 1));
}
- up(&rsrc_sem);
+ mutex_unlock(&rsrc_mutex);
return (ret);
}
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index 2b3c2895b43d8..eb89928f2338a 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -5,7 +5,6 @@
* Based off the Assabet.
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 5ab1cdef7c48c..c5d7476da4712 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -12,7 +12,6 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/config.h>
#include <linux/string.h>
#include <linux/major.h>
#include <linux/errno.h>
@@ -25,6 +24,7 @@
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <asm/system.h>
#include <asm/irq.h>
@@ -183,7 +183,7 @@ static ssize_t pccard_store_resource(struct class_device *dev, const char *buf,
s->resource_setup_done = 1;
spin_unlock_irqrestore(&s->lock, flags);
- down(&s->skt_sem);
+ mutex_lock(&s->skt_mutex);
if ((s->callback) &&
(s->state & SOCKET_PRESENT) &&
!(s->state & SOCKET_CARDBUS)) {
@@ -192,7 +192,7 @@ static ssize_t pccard_store_resource(struct class_device *dev, const char *buf,
module_put(s->callback->owner);
}
}
- up(&s->skt_sem);
+ mutex_unlock(&s->skt_mutex);
return count;
}
@@ -322,7 +322,7 @@ static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, siz
kfree(cis);
if (!ret) {
- down(&s->skt_sem);
+ mutex_lock(&s->skt_mutex);
if ((s->callback) && (s->state & SOCKET_PRESENT) &&
!(s->state & SOCKET_CARDBUS)) {
if (try_module_get(s->callback->owner)) {
@@ -330,7 +330,7 @@ static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, siz
module_put(s->callback->owner);
}
}
- up(&s->skt_sem);
+ mutex_unlock(&s->skt_mutex);
}
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index d5b4ff74462e9..7a3d1b8e16b92 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -30,7 +30,6 @@
#ifndef _LINUX_TI113X_H
#define _LINUX_TI113X_H
-#include <linux/config.h>
/* Register definitions for TI 113X PCI-to-CardBus bridges */
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 0574efd7828ac..459e6e1946fd5 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -634,7 +634,7 @@ static void vrc4171_remove_sockets(void)
static int __devinit vrc4171_card_setup(char *options)
{
if (options == NULL || *options == '\0')
- return 0;
+ return 1;
if (strncmp(options, "irq:", 4) == 0) {
int irq;
@@ -644,7 +644,7 @@ static int __devinit vrc4171_card_setup(char *options)
vrc4171_irq = irq;
if (*options != ',')
- return 0;
+ return 1;
options++;
}
@@ -663,10 +663,10 @@ static int __devinit vrc4171_card_setup(char *options)
}
if (*options != ',')
- return 0;
+ return 1;
options++;
} else
- return 0;
+ return 1;
}
@@ -688,7 +688,7 @@ static int __devinit vrc4171_card_setup(char *options)
}
if (*options != ',')
- return 0;
+ return 1;
options++;
if (strncmp(options, "memnoprobe", 10) == 0)
@@ -700,7 +700,7 @@ static int __devinit vrc4171_card_setup(char *options)
}
}
- return 0;
+ return 1;
}
__setup("vrc4171_card=", vrc4171_card_setup);
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index 57f38dba0a489..6004196f7cc10 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -516,7 +516,7 @@ static int __devinit vrc4173_cardu_probe(struct pci_dev *dev,
static int __devinit vrc4173_cardu_setup(char *options)
{
if (options == NULL || *options == '\0')
- return 0;
+ return 1;
if (strncmp(options, "cardu1:", 7) == 0) {
options += 7;
@@ -527,9 +527,9 @@ static int __devinit vrc4173_cardu_setup(char *options)
}
if (*options != ',')
- return 0;
+ return 1;
} else
- return 0;
+ return 1;
}
if (strncmp(options, "cardu2:", 7) == 0) {
@@ -538,7 +538,7 @@ static int __devinit vrc4173_cardu_setup(char *options)
cardu_sockets[CARDU2].noprobe = 1;
}
- return 0;
+ return 1;
}
__setup("vrc4173_cardu=", vrc4173_cardu_setup);
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 8fd71ab02ef05..b842377cb0c6c 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -32,9 +32,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
int size;
/* Sanity checks */
- if ( magic == NULL || datasize > PAGE_SIZE ||
- (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
- BUG();
+ BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
+ (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
@@ -125,8 +124,7 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
struct dasd_device *device;
int success;
- if (cqr->refers == NULL || cqr->function == NULL)
- BUG();
+ BUG_ON(cqr->refers == NULL || cqr->function == NULL);
device = cqr->device;
success = cqr->status == DASD_CQR_DONE;
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index ac10dfb20a624..91e93c78f57a0 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -24,7 +24,7 @@
/*
* The room for the SCCB (only for writing) is not equal to a pages size
- * (as it is specified as the maximum size in the the SCLP ducumentation)
+ * (as it is specified as the maximum size in the the SCLP documentation)
* because of the additional data structure described above.
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 5ced2725d6c70..5c65cf3e5cc02 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -198,9 +198,7 @@ tapeblock_request_fn(request_queue_t *queue)
device = (struct tape_device *) queue->queuedata;
DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
- if (device == NULL)
- BUG();
-
+ BUG_ON(device == NULL);
tapeblock_trigger_requeue(device);
}
@@ -307,8 +305,7 @@ tapeblock_revalidate_disk(struct gendisk *disk)
int rc;
device = (struct tape_device *) disk->private_data;
- if (!device)
- BUG();
+ BUG_ON(!device);
if (!device->blk_data.medium_changed)
return 0;
@@ -440,11 +437,9 @@ tapeblock_ioctl(
rc = 0;
disk = inode->i_bdev->bd_disk;
- if (!disk)
- BUG();
+ BUG_ON(!disk);
device = disk->private_data;
- if (!device)
- BUG();
+ BUG_ON(!device);
minor = iminor(inode);
DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index edcf05d5d568a..5d6b7a57b02f1 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -675,9 +675,8 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, rc;
LCS_DBF_TEXT(5, trace, "rdybuff");
- if (buffer->state != BUF_STATE_LOCKED &&
- buffer->state != BUF_STATE_PROCESSED)
- BUG();
+ BUG_ON(buffer->state != BUF_STATE_LOCKED &&
+ buffer->state != BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer->state = BUF_STATE_READY;
index = buffer - channel->iob;
@@ -701,8 +700,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, prev, next;
LCS_DBF_TEXT(5, trace, "prcsbuff");
- if (buffer->state != BUF_STATE_READY)
- BUG();
+ BUG_ON(buffer->state != BUF_STATE_READY);
buffer->state = BUF_STATE_PROCESSED;
index = buffer - channel->iob;
prev = (index - 1) & (LCS_NUM_BUFFS - 1);
@@ -734,9 +732,8 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
unsigned long flags;
LCS_DBF_TEXT(5, trace, "relbuff");
- if (buffer->state != BUF_STATE_LOCKED &&
- buffer->state != BUF_STATE_PROCESSED)
- BUG();
+ BUG_ON(buffer->state != BUF_STATE_LOCKED &&
+ buffer->state != BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer->state = BUF_STATE_EMPTY;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index ffba65656a838..1bd82c4e52a0d 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -293,6 +293,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
board_ahci }, /* JMicron JMB360 */
{ 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* JMicron JMB363 */
+ { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ATI SB600 non-raid */
+ { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ATI SB600 raid */
{ } /* terminate list */
};
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 6c2c395554ff3..5517da5855f00 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -86,7 +86,7 @@ config AIC7XXX_DEBUG_MASK
default "0"
help
Bit mask of debug options that is only valid if the
- CONFIG_AIC7XXX_DEBUG_ENBLE option is enabled. The bits in this mask
+ CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask
are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
variable ahc_debug in that file to find them.
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 2d5be84d8bd4e..24e71b5551722 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -301,7 +301,7 @@ static struct piix_map_db ich6_map_db = {
.mask = 0x3,
.map = {
/* PM PS SM SS MAP */
- { P0, P1, P2, P3 }, /* 00b */
+ { P0, P2, P1, P3 }, /* 00b */
{ IDE, IDE, P1, P3 }, /* 01b */
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
@@ -312,7 +312,7 @@ static struct piix_map_db ich6m_map_db = {
.mask = 0x3,
.map = {
/* PM PS SM SS MAP */
- { P0, P1, P2, P3 }, /* 00b */
+ { P0, P2, RV, RV }, /* 00b */
{ RV, RV, RV, RV },
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 3a8462e8d0635..24eb59e143a95 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -2488,7 +2488,7 @@ static int option_setup(char *str)
}
ints[0] = i - 1;
internal_ibmmca_scsi_setup(cur, ints);
- return 0;
+ return 1;
}
__setup("ibmmcascsi=", option_setup);
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index f47dd87c05e75..892e8ed630915 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -80,7 +80,7 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue,
tasklet_kill(&hostdata->srp_task);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_Busy) || (H_isLongBusy(rc)));
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
dma_unmap_single(hostdata->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
@@ -230,7 +230,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
- if (rc == H_Resource)
+ if (rc == H_RESOURCE)
/* maybe kexecing and resource is busy. try a reset */
rc = ibmvscsi_reset_crq_queue(queue,
hostdata);
@@ -269,7 +269,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
req_irq_failed:
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_Busy) || (H_isLongBusy(rc)));
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
reg_crq_failed:
dma_unmap_single(hostdata->dev,
queue->msg_token,
@@ -295,7 +295,7 @@ int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
/* Re-enable the CRQ */
do {
rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
- } while ((rc == H_InProgress) || (rc == H_Busy) || (H_isLongBusy(rc)));
+ } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
if (rc)
printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc);
@@ -317,7 +317,7 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
/* Close the CRQ */
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_Busy) || (H_isLongBusy(rc)));
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
/* Clean out the queue */
memset(queue->msgs, 0x00, PAGE_SIZE);
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 21b0ed583b8a7..e63c1ff1e1025 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -278,7 +278,7 @@ static void ata_unpack_xfermask(unsigned int xfer_mask,
}
static const struct ata_xfer_ent {
- unsigned int shift, bits;
+ int shift, bits;
u8 base;
} ata_xfer_tbl[] = {
{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
@@ -989,9 +989,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
qc->private_data = &wait;
qc->complete_fn = ata_qc_complete_internal;
- qc->err_mask = ata_qc_issue(qc);
- if (qc->err_mask)
- ata_qc_complete(qc);
+ ata_qc_issue(qc);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
@@ -3997,15 +3995,14 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
- *
- * RETURNS:
- * Zero on success, AC_ERR_* mask on failure
*/
-
-unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
+void ata_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ qc->ap->active_tag = qc->tag;
+ qc->flags |= ATA_QCFLAG_ACTIVE;
+
if (ata_should_dma_map(qc)) {
if (qc->flags & ATA_QCFLAG_SG) {
if (ata_sg_setup(qc))
@@ -4020,17 +4017,18 @@ unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
ap->ops->qc_prep(qc);
- qc->ap->active_tag = qc->tag;
- qc->flags |= ATA_QCFLAG_ACTIVE;
-
- return ap->ops->qc_issue(qc);
+ qc->err_mask |= ap->ops->qc_issue(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
+ return;
sg_err:
qc->flags &= ~ATA_QCFLAG_DMAMAP;
- return AC_ERR_SYSTEM;
+ qc->err_mask |= AC_ERR_SYSTEM;
+err:
+ ata_qc_complete(qc);
}
-
/**
* ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
* @qc: command to issue to device
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 628191bfd990a..53f5b0d9161c0 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -1431,9 +1431,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
goto early_finish;
/* select device, send command to hardware */
- qc->err_mask = ata_qc_issue(qc);
- if (qc->err_mask)
- ata_qc_complete(qc);
+ ata_qc_issue(qc);
VPRINTK("EXIT\n");
return;
@@ -2199,9 +2197,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
qc->complete_fn = atapi_sense_complete;
- qc->err_mask = ata_qc_issue(qc);
- if (qc->err_mask)
- ata_qc_complete(qc);
+ ata_qc_issue(qc);
DPRINTK("EXIT\n");
}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 65f52beea8846..1c755b14521a9 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -47,7 +47,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
extern void ata_port_flush_task(struct ata_port *ap);
extern void ata_qc_free(struct ata_queued_cmd *qc);
-extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
+extern void ata_qc_issue(struct ata_queued_cmd *qc);
extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
extern void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 5609847e254a8..ee449b29fc82c 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -89,29 +89,29 @@ MODULE_LICENSE("Dual MPL/GPL");
/*====================================================================*/
typedef struct scsi_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct Scsi_Host *host;
} scsi_info_t;
-static void aha152x_release_cs(dev_link_t *link);
+static void aha152x_release_cs(struct pcmcia_device *link);
static void aha152x_detach(struct pcmcia_device *p_dev);
-static void aha152x_config_cs(dev_link_t *link);
+static int aha152x_config_cs(struct pcmcia_device *link);
-static dev_link_t *dev_list;
+static struct pcmcia_device *dev_list;
-static int aha152x_attach(struct pcmcia_device *p_dev)
+static int aha152x_probe(struct pcmcia_device *link)
{
scsi_info_t *info;
- dev_link_t *link;
-
+
DEBUG(0, "aha152x_attach()\n");
/* Create new SCSI device */
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
memset(info, 0, sizeof(*info));
- link = &info->link; link->priv = info;
+ info->p_dev = link;
+ link->priv = info;
link->io.NumPorts1 = 0x20;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -119,41 +119,22 @@ static int aha152x_attach(struct pcmcia_device *p_dev)
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- aha152x_config_cs(link);
-
- return 0;
+ return aha152x_config_cs(link);
} /* aha152x_attach */
/*====================================================================*/
-static void aha152x_detach(struct pcmcia_device *p_dev)
+static void aha152x_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
- dev_link_t **linkp;
-
DEBUG(0, "aha152x_detach(0x%p)\n", link);
-
- /* Locate device structure */
- for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
- if (*linkp == link) break;
- if (*linkp == NULL)
- return;
- if (link->state & DEV_CONFIG)
- aha152x_release_cs(link);
+ aha152x_release_cs(link);
/* Unlink device structure, free bits */
- *linkp = link->next;
kfree(link->priv);
-
} /* aha152x_detach */
/*====================================================================*/
@@ -161,9 +142,8 @@ static void aha152x_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void aha152x_config_cs(dev_link_t *link)
+static int aha152x_config_cs(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
scsi_info_t *info = link->priv;
struct aha152x_setup s;
tuple_t tuple;
@@ -178,19 +158,16 @@ static void aha152x_config_cs(dev_link_t *link)
tuple.TupleData = tuple_data;
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
- /* Configure card */
- link->state |= DEV_CONFIG;
-
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
/* For New Media T&J, look for a SCSI window */
if (parse.cftable_entry.io.win[0].len >= 0x20)
@@ -201,15 +178,15 @@ static void aha152x_config_cs(dev_link_t *link)
if ((parse.cftable_entry.io.nwin > 0) &&
(link->io.BasePort1 < 0xffff)) {
link->conf.ConfigIndex = parse.cftable_entry.index;
- i = pcmcia_request_io(handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
}
next_entry:
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
/* Set configuration options for the aha152x driver */
memset(&s, 0, sizeof(s));
@@ -231,53 +208,30 @@ static void aha152x_config_cs(dev_link_t *link)
}
sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev = &info->node;
+ link->dev_node = &info->node;
info->host = host;
- link->state &= ~DEV_CONFIG_PENDING;
- return;
-
+ return 0;
+
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
aha152x_release_cs(link);
- return;
+ return -ENODEV;
}
-static void aha152x_release_cs(dev_link_t *link)
+static void aha152x_release_cs(struct pcmcia_device *link)
{
scsi_info_t *info = link->priv;
aha152x_release(info->host);
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
}
-static int aha152x_suspend(struct pcmcia_device *dev)
+static int aha152x_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int aha152x_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
scsi_info_t *info = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- aha152x_host_reset_host(info->host);
- }
+ aha152x_host_reset_host(info->host);
return 0;
}
@@ -297,10 +251,9 @@ static struct pcmcia_driver aha152x_cs_driver = {
.drv = {
.name = "aha152x_cs",
},
- .probe = aha152x_attach,
+ .probe = aha152x_probe,
.remove = aha152x_detach,
.id_table = aha152x_ids,
- .suspend = aha152x_suspend,
.resume = aha152x_resume,
};
@@ -317,4 +270,3 @@ static void __exit exit_aha152x_cs(void)
module_init(init_aha152x_cs);
module_exit(exit_aha152x_cs);
-
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 788c58d805f39..85f7ffac19a0e 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -73,57 +73,48 @@ static char *version =
/*====================================================================*/
typedef struct scsi_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct Scsi_Host *host;
} scsi_info_t;
-static void fdomain_release(dev_link_t *link);
+static void fdomain_release(struct pcmcia_device *link);
static void fdomain_detach(struct pcmcia_device *p_dev);
-static void fdomain_config(dev_link_t *link);
+static int fdomain_config(struct pcmcia_device *link);
-static int fdomain_attach(struct pcmcia_device *p_dev)
+static int fdomain_probe(struct pcmcia_device *link)
{
- scsi_info_t *info;
- dev_link_t *link;
-
- DEBUG(0, "fdomain_attach()\n");
-
- /* Create new SCSI device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) return -ENOMEM;
- memset(info, 0, sizeof(*info));
- link = &info->link; link->priv = info;
- link->io.NumPorts1 = 0x10;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.IRQInfo1 = IRQ_LEVEL_ID;
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->conf.Present = PRESENT_OPTION;
-
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- fdomain_config(link);
-
- return 0;
+ scsi_info_t *info;
+
+ DEBUG(0, "fdomain_attach()\n");
+
+ /* Create new SCSI device */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->p_dev = link;
+ link->priv = info;
+ link->io.NumPorts1 = 0x10;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 10;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.Present = PRESENT_OPTION;
+
+ return fdomain_config(link);
} /* fdomain_attach */
/*====================================================================*/
-static void fdomain_detach(struct pcmcia_device *p_dev)
+static void fdomain_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DEBUG(0, "fdomain_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG)
- fdomain_release(link);
+ fdomain_release(link);
kfree(link->priv);
} /* fdomain_detach */
@@ -133,9 +124,8 @@ static void fdomain_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void fdomain_config(dev_link_t *link)
+static int fdomain_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
scsi_info_t *info = link->priv;
tuple_t tuple;
cisparse_t parse;
@@ -150,103 +140,75 @@ static void fdomain_config(dev_link_t *link)
tuple.TupleData = tuple_data;
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
- /* Configure card */
- link->state |= DEV_CONFIG;
-
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
link->conf.ConfigIndex = parse.cftable_entry.index;
link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
- i = pcmcia_request_io(handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS) break;
next_entry:
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
-
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
+
/* A bad hack... */
release_region(link->io.BasePort1, link->io.NumPorts1);
/* Set configuration options for the fdomain driver */
sprintf(str, "%d,%d", link->io.BasePort1, link->irq.AssignedIRQ);
fdomain_setup(str);
-
+
host = __fdomain_16x0_detect(&fdomain_driver_template);
if (!host) {
printk(KERN_INFO "fdomain_cs: no SCSI devices found\n");
goto cs_failed;
}
-
- scsi_add_host(host, NULL); /* XXX handle failure */
+
+ if (scsi_add_host(host, NULL))
+ goto cs_failed;
scsi_scan_host(host);
sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev = &info->node;
+ link->dev_node = &info->node;
info->host = host;
-
- link->state &= ~DEV_CONFIG_PENDING;
- return;
-
+
+ return 0;
+
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
fdomain_release(link);
- return;
-
+ return -ENODEV;
} /* fdomain_config */
/*====================================================================*/
-static void fdomain_release(dev_link_t *link)
+static void fdomain_release(struct pcmcia_device *link)
{
- scsi_info_t *info = link->priv;
+ scsi_info_t *info = link->priv;
- DEBUG(0, "fdomain_release(0x%p)\n", link);
+ DEBUG(0, "fdomain_release(0x%p)\n", link);
- scsi_remove_host(info->host);
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- scsi_unregister(info->host);
-
- link->state &= ~DEV_CONFIG;
+ scsi_remove_host(info->host);
+ pcmcia_disable_device(link);
+ scsi_unregister(info->host);
}
/*====================================================================*/
-static int fdomain_suspend(struct pcmcia_device *dev)
+static int fdomain_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int fdomain_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
- fdomain_16x0_bus_reset(NULL);
- }
+ fdomain_16x0_bus_reset(NULL);
return 0;
}
@@ -264,10 +226,9 @@ static struct pcmcia_driver fdomain_cs_driver = {
.drv = {
.name = "fdomain_cs",
},
- .probe = fdomain_attach,
+ .probe = fdomain_probe,
.remove = fdomain_detach,
.id_table = fdomain_ids,
- .suspend = fdomain_suspend,
.resume = fdomain_resume,
};
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 9e3ab3fd53555..231f9c311c697 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1593,11 +1593,11 @@ static int nsp_eh_host_reset(Scsi_Cmnd *SCpnt)
configure the card at this point -- we wait until we receive a
card insertion event.
======================================================================*/
-static int nsp_cs_attach(struct pcmcia_device *p_dev)
+static int nsp_cs_probe(struct pcmcia_device *link)
{
scsi_info_t *info;
- dev_link_t *link;
nsp_hw_data *data = &nsp_data_base;
+ int ret;
nsp_dbg(NSP_DEBUG_INIT, "in");
@@ -1605,7 +1605,7 @@ static int nsp_cs_attach(struct pcmcia_device *p_dev)
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) { return -ENOMEM; }
memset(info, 0, sizeof(*info));
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
data->ScsiInfo = info;
@@ -1627,18 +1627,13 @@ static int nsp_cs_attach(struct pcmcia_device *p_dev)
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- nsp_cs_config(link);
+ ret = nsp_cs_config(link);
nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link);
- return 0;
+ return ret;
} /* nsp_cs_attach */
@@ -1648,16 +1643,12 @@ static int nsp_cs_attach(struct pcmcia_device *p_dev)
structures are freed. Otherwise, the structures will be freed
when the device is released.
======================================================================*/
-static void nsp_cs_detach(struct pcmcia_device *p_dev)
+static void nsp_cs_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link);
- if (link->state & DEV_CONFIG) {
- ((scsi_info_t *)link->priv)->stop = 1;
- nsp_cs_release(link);
- }
+ ((scsi_info_t *)link->priv)->stop = 1;
+ nsp_cs_release(link);
kfree(link->priv);
link->priv = NULL;
@@ -1672,9 +1663,9 @@ static void nsp_cs_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
/*====================================================================*/
-static void nsp_cs_config(dev_link_t *link)
+static int nsp_cs_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
+ int ret;
scsi_info_t *info = link->priv;
tuple_t tuple;
cisparse_t parse;
@@ -1698,26 +1689,22 @@ static void nsp_cs_config(dev_link_t *link)
tuple.TupleData = tuple_data;
tuple.TupleDataMax = sizeof(tuple_data);
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/* Look up the current Vcc */
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
+ CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
if (cfg->flags & CISTPL_CFTABLE_DEFAULT) { dflt = *cfg; }
@@ -1743,10 +1730,10 @@ static void nsp_cs_config(dev_link_t *link)
}
if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) {
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
} else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) {
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
}
@@ -1773,7 +1760,7 @@ static void nsp_cs_config(dev_link_t *link)
link->io.NumPorts2 = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
}
@@ -1788,7 +1775,7 @@ static void nsp_cs_config(dev_link_t *link)
req.Size = 0x1000;
}
req.AccessSpeed = 0;
- if (pcmcia_request_window(&link->handle, &req, &link->win) != 0)
+ if (pcmcia_request_window(&link, &req, &link->win) != 0)
goto next_entry;
map.Page = 0; map.CardOffset = mem->win[0].card_addr;
if (pcmcia_map_mem_page(link->win, &map) != 0)
@@ -1802,17 +1789,14 @@ static void nsp_cs_config(dev_link_t *link)
next_entry:
nsp_dbg(NSP_DEBUG_INIT, "next");
-
- if (link->io.NumPorts1) {
- pcmcia_release_io(link->handle, &link->io);
- }
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ pcmcia_disable_device(link);
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
}
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
if (free_ports) {
if (link->io.BasePort1) {
@@ -1854,16 +1838,19 @@ static void nsp_cs_config(dev_link_t *link)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,74))
- scsi_add_host (host, NULL);
+ ret = scsi_add_host (host, NULL);
+ if (ret)
+ goto cs_failed;
+
scsi_scan_host(host);
snprintf(info->node.dev_name, sizeof(info->node.dev_name), "scsi%d", host->host_no);
- link->dev = &info->node;
+ link->dev_node = &info->node;
info->host = host;
#else
nsp_dbg(NSP_DEBUG_INIT, "GET_SCSI_INFO");
- tail = &link->dev;
+ tail = &link->dev_node;
info->ndev = 0;
nsp_dbg(NSP_DEBUG_INIT, "host=0x%p", host);
@@ -1908,11 +1895,10 @@ static void nsp_cs_config(dev_link_t *link)
#endif
/* Finally, report what we've done */
- printk(KERN_INFO "nsp_cs: index 0x%02x: Vcc %d.%d",
- link->conf.ConfigIndex,
- link->conf.Vcc/10, link->conf.Vcc%10);
- if (link->conf.Vpp1) {
- printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ printk(KERN_INFO "nsp_cs: index 0x%02x: ",
+ link->conf.ConfigIndex);
+ if (link->conf.Vpp) {
+ printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
}
if (link->conf.Attributes & CONF_ENABLE_IRQ) {
printk(", irq %d", link->irq.AssignedIRQ);
@@ -1929,15 +1915,14 @@ static void nsp_cs_config(dev_link_t *link)
req.Base+req.Size-1);
printk("\n");
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ return 0;
cs_failed:
nsp_dbg(NSP_DEBUG_INIT, "config fail");
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
nsp_cs_release(link);
- return;
+ return -ENODEV;
} /* nsp_cs_config */
#undef CS_CHECK
@@ -1947,7 +1932,7 @@ static void nsp_cs_config(dev_link_t *link)
device, and release the PCMCIA configuration. If the device is
still open, this will be postponed until it is closed.
======================================================================*/
-static void nsp_cs_release(dev_link_t *link)
+static void nsp_cs_release(struct pcmcia_device *link)
{
scsi_info_t *info = link->priv;
nsp_hw_data *data = NULL;
@@ -1968,22 +1953,15 @@ static void nsp_cs_release(dev_link_t *link)
#else
scsi_unregister_host(&nsp_driver_template);
#endif
- link->dev = NULL;
+ link->dev_node = NULL;
if (link->win) {
if (data != NULL) {
iounmap((void *)(data->MmioAddress));
}
- pcmcia_release_window(link->win);
- }
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1) {
- pcmcia_release_io(link->handle, &link->io);
}
- if (link->irq.AssignedIRQ) {
- pcmcia_release_irq(link->handle, &link->irq);
- }
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,2))
if (info->host != NULL) {
scsi_host_put(info->host);
@@ -1991,14 +1969,11 @@ static void nsp_cs_release(dev_link_t *link)
#endif
} /* nsp_cs_release */
-static int nsp_cs_suspend(struct pcmcia_device *dev)
+static int nsp_cs_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
scsi_info_t *info = link->priv;
nsp_hw_data *data;
- link->state |= DEV_SUSPEND;
-
nsp_dbg(NSP_DEBUG_INIT, "event: suspend");
if (info->host != NULL) {
@@ -2011,25 +1986,16 @@ static int nsp_cs_suspend(struct pcmcia_device *dev)
info->stop = 1;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
return 0;
}
-static int nsp_cs_resume(struct pcmcia_device *dev)
+static int nsp_cs_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
scsi_info_t *info = link->priv;
nsp_hw_data *data;
nsp_dbg(NSP_DEBUG_INIT, "event: resume");
- link->state &= ~DEV_SUSPEND;
-
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
-
info->stop = 0;
if (info->host != NULL) {
@@ -2065,7 +2031,7 @@ static struct pcmcia_driver nsp_driver = {
.drv = {
.name = "nsp_cs",
},
- .probe = nsp_cs_attach,
+ .probe = nsp_cs_probe,
.remove = nsp_cs_detach,
.id_table = nsp_cs_ids,
.suspend = nsp_cs_suspend,
@@ -2098,19 +2064,7 @@ static int __init nsp_cs_init(void)
static void __exit nsp_cs_exit(void)
{
nsp_msg(KERN_INFO, "unloading...");
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,68))
pcmcia_unregister_driver(&nsp_driver);
-#else
- unregister_pcmcia_driver(&dev_info);
- /* XXX: this really needs to move into generic code.. */
- while (dev_list != NULL) {
- if (dev_list->state & DEV_CONFIG) {
- nsp_cs_release(dev_list);
- }
- nsp_cs_detach(dev_list);
- }
-#endif
}
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index b66b140a745e4..8908b8e5b78a9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -225,7 +225,7 @@
/*====================================================================*/
typedef struct scsi_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
struct Scsi_Host *host;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,74))
dev_node_t node;
@@ -297,8 +297,8 @@ typedef struct _nsp_hw_data {
/* Card service functions */
static void nsp_cs_detach (struct pcmcia_device *p_dev);
-static void nsp_cs_release(dev_link_t *link);
-static void nsp_cs_config (dev_link_t *link);
+static void nsp_cs_release(struct pcmcia_device *link);
+static int nsp_cs_config (struct pcmcia_device *link);
/* Linux SCSI subsystem specific functions */
static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht);
@@ -450,7 +450,7 @@ static inline struct Scsi_Host *scsi_host_hn_get(unsigned short hostno)
return host;
}
-static void cs_error(client_handle_t handle, int func, int ret)
+static void cs_error(struct pcmcia_device *handle, int func, int ret)
{
error_info_t err = { func, ret };
pcmcia_report_error(handle, &err);
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index dce7e687fd4a2..86c2ac6ae6239 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -91,18 +91,18 @@ static struct scsi_host_template qlogicfas_driver_template = {
/*====================================================================*/
typedef struct scsi_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct Scsi_Host *host;
unsigned short manf_id;
} scsi_info_t;
-static void qlogic_release(dev_link_t *link);
+static void qlogic_release(struct pcmcia_device *link);
static void qlogic_detach(struct pcmcia_device *p_dev);
-static void qlogic_config(dev_link_t * link);
+static int qlogic_config(struct pcmcia_device * link);
static struct Scsi_Host *qlogic_detect(struct scsi_host_template *host,
- dev_link_t *link, int qbase, int qlirq)
+ struct pcmcia_device *link, int qbase, int qlirq)
{
int qltyp; /* type of chip */
int qinitid;
@@ -156,10 +156,9 @@ free_scsi_host:
err:
return NULL;
}
-static int qlogic_attach(struct pcmcia_device *p_dev)
+static int qlogic_probe(struct pcmcia_device *link)
{
scsi_info_t *info;
- dev_link_t *link;
DEBUG(0, "qlogic_attach()\n");
@@ -168,7 +167,7 @@ static int qlogic_attach(struct pcmcia_device *p_dev)
if (!info)
return -ENOMEM;
memset(info, 0, sizeof(*info));
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -176,30 +175,19 @@ static int qlogic_attach(struct pcmcia_device *p_dev)
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- qlogic_config(link);
-
- return 0;
+ return qlogic_config(link);
} /* qlogic_attach */
/*====================================================================*/
-static void qlogic_detach(struct pcmcia_device *p_dev)
+static void qlogic_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DEBUG(0, "qlogic_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG)
- qlogic_release(link);
-
+ qlogic_release(link);
kfree(link->priv);
} /* qlogic_detach */
@@ -209,9 +197,8 @@ static void qlogic_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void qlogic_config(dev_link_t * link)
+static int qlogic_config(struct pcmcia_device * link)
{
- client_handle_t handle = link->handle;
scsi_info_t *info = link->priv;
tuple_t tuple;
cisparse_t parse;
@@ -225,38 +212,35 @@ static void qlogic_config(dev_link_t * link)
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
tuple.DesiredTuple = CISTPL_MANFID;
- if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) && (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS))
+ if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) && (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
info->manf_id = le16_to_cpu(tuple.TupleData[0]);
- /* Configure card */
- link->state |= DEV_CONFIG;
-
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
link->conf.ConfigIndex = parse.cftable_entry.index;
link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
if (link->io.BasePort1 != 0) {
- i = pcmcia_request_io(handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
break;
}
next_entry:
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) {
/* set ATAcmd */
@@ -275,82 +259,54 @@ static void qlogic_config(dev_link_t * link)
if (!host) {
printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
- goto out;
+ goto cs_failed;
}
sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev = &info->node;
+ link->dev_node = &info->node;
info->host = host;
-out:
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
- link->dev = NULL;
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
- return;
+ cs_error(link, last_fn, last_ret);
+ pcmcia_disable_device(link);
+ return -ENODEV;
} /* qlogic_config */
/*====================================================================*/
-static void qlogic_release(dev_link_t *link)
+static void qlogic_release(struct pcmcia_device *link)
{
scsi_info_t *info = link->priv;
DEBUG(0, "qlogic_release(0x%p)\n", link);
scsi_remove_host(info->host);
- link->dev = NULL;
free_irq(link->irq.AssignedIRQ, info->host);
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
+ pcmcia_disable_device(link);
scsi_host_put(info->host);
-
- link->state &= ~DEV_CONFIG;
}
/*====================================================================*/
-static int qlogic_suspend(struct pcmcia_device *dev)
+static int qlogic_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
+ scsi_info_t *info = link->priv;
-static int qlogic_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- scsi_info_t *info = link->priv;
-
- pcmcia_request_configuration(link->handle, &link->conf);
- if ((info->manf_id == MANFID_MACNICA) ||
- (info->manf_id == MANFID_PIONEER) ||
- (info->manf_id == 0x0098)) {
- outb(0x80, link->io.BasePort1 + 0xd);
- outb(0x24, link->io.BasePort1 + 0x9);
- outb(0x04, link->io.BasePort1 + 0xd);
- }
- /* Ugggglllyyyy!!! */
- qlogicfas408_bus_reset(NULL);
+ pcmcia_request_configuration(link, &link->conf);
+ if ((info->manf_id == MANFID_MACNICA) ||
+ (info->manf_id == MANFID_PIONEER) ||
+ (info->manf_id == 0x0098)) {
+ outb(0x80, link->io.BasePort1 + 0xd);
+ outb(0x24, link->io.BasePort1 + 0x9);
+ outb(0x04, link->io.BasePort1 + 0xd);
}
+ /* Ugggglllyyyy!!! */
+ qlogicfas408_bus_reset(NULL);
return 0;
}
@@ -382,10 +338,9 @@ static struct pcmcia_driver qlogic_cs_driver = {
.drv = {
.name = "qlogic_cs",
},
- .probe = qlogic_attach,
+ .probe = qlogic_probe,
.remove = qlogic_detach,
.id_table = qlogic_ids,
- .suspend = qlogic_suspend,
.resume = qlogic_resume,
};
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 3a4dd6f5b81fb..9f59827707f0c 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -202,7 +202,7 @@ static char *version =
/* ================================================================== */
struct scsi_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
struct Scsi_Host *host;
unsigned short manf_id;
@@ -527,7 +527,7 @@ idle_out:
}
static void
-SYM53C500_release(dev_link_t *link)
+SYM53C500_release(struct pcmcia_device *link)
{
struct scsi_info_t *info = link->priv;
struct Scsi_Host *shost = info->host;
@@ -550,13 +550,7 @@ SYM53C500_release(dev_link_t *link)
if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port);
- link->dev = NULL;
-
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
-
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
scsi_host_put(shost);
} /* SYM53C500_release */
@@ -713,10 +707,9 @@ static struct scsi_host_template sym53c500_driver_template = {
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void
-SYM53C500_config(dev_link_t *link)
+static int
+SYM53C500_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct scsi_info_t *info = link->priv;
tuple_t tuple;
cisparse_t parse;
@@ -733,40 +726,37 @@ SYM53C500_config(dev_link_t *link)
tuple.TupleDataMax = 64;
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
tuple.DesiredTuple = CISTPL_MANFID;
- if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) &&
- (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS))
+ if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
+ (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
info->manf_id = le16_to_cpu(tuple.TupleData[0]);
- /* Configure card */
- link->state |= DEV_CONFIG;
-
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
link->conf.ConfigIndex = parse.cftable_entry.index;
link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
if (link->io.BasePort1 != 0) {
- i = pcmcia_request_io(handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
break;
}
next_entry:
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
- CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
/*
* That's the trouble with copying liberally from another driver.
@@ -835,7 +825,7 @@ next_entry:
data->fast_pio = USE_FAST_PIO;
sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev = &info->node;
+ link->dev_node = &info->node;
info->host = host;
if (scsi_add_host(host, NULL))
@@ -843,7 +833,7 @@ next_entry:
scsi_scan_host(host);
- goto out; /* SUCCESS */
+ return 0;
err_free_irq:
free_irq(irq_level, host);
@@ -852,74 +842,50 @@ err_free_scsi:
err_release:
release_region(port_base, 0x10);
printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n");
-
-out:
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ return -ENODEV;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
SYM53C500_release(link);
- return;
+ return -ENODEV;
} /* SYM53C500_config */
-static int sym53c500_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int sym53c500_resume(struct pcmcia_device *dev)
+static int sym53c500_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
struct scsi_info_t *info = link->priv;
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG) {
- pcmcia_request_configuration(link->handle, &link->conf);
-
- /* See earlier comment about manufacturer IDs. */
- if ((info->manf_id == MANFID_MACNICA) ||
- (info->manf_id == MANFID_PIONEER) ||
- (info->manf_id == 0x0098)) {
- outb(0x80, link->io.BasePort1 + 0xd);
- outb(0x24, link->io.BasePort1 + 0x9);
- outb(0x04, link->io.BasePort1 + 0xd);
- }
- /*
- * If things don't work after a "resume",
- * this is a good place to start looking.
- */
- SYM53C500_int_host_reset(link->io.BasePort1);
+ /* See earlier comment about manufacturer IDs. */
+ if ((info->manf_id == MANFID_MACNICA) ||
+ (info->manf_id == MANFID_PIONEER) ||
+ (info->manf_id == 0x0098)) {
+ outb(0x80, link->io.BasePort1 + 0xd);
+ outb(0x24, link->io.BasePort1 + 0x9);
+ outb(0x04, link->io.BasePort1 + 0xd);
}
+ /*
+ * If things don't work after a "resume",
+ * this is a good place to start looking.
+ */
+ SYM53C500_int_host_reset(link->io.BasePort1);
return 0;
}
static void
-SYM53C500_detach(struct pcmcia_device *p_dev)
+SYM53C500_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DEBUG(0, "SYM53C500_detach(0x%p)\n", link);
- if (link->state & DEV_CONFIG)
- SYM53C500_release(link);
+ SYM53C500_release(link);
kfree(link->priv);
link->priv = NULL;
} /* SYM53C500_detach */
static int
-SYM53C500_attach(struct pcmcia_device *p_dev)
+SYM53C500_probe(struct pcmcia_device *link)
{
struct scsi_info_t *info;
- dev_link_t *link;
DEBUG(0, "SYM53C500_attach()\n");
@@ -928,7 +894,7 @@ SYM53C500_attach(struct pcmcia_device *p_dev)
if (!info)
return -ENOMEM;
memset(info, 0, sizeof(*info));
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -936,17 +902,10 @@ SYM53C500_attach(struct pcmcia_device *p_dev)
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- SYM53C500_config(link);
-
- return 0;
+ return SYM53C500_config(link);
} /* SYM53C500_attach */
MODULE_AUTHOR("Bob Tracy <rct@frus.com>");
@@ -966,10 +925,9 @@ static struct pcmcia_driver sym53c500_cs_driver = {
.drv = {
.name = "sym53c500_cs",
},
- .probe = SYM53C500_attach,
+ .probe = SYM53C500_probe,
.remove = SYM53C500_detach,
.id_table = sym53c500_ids,
- .suspend = sym53c500_suspend,
.resume = sym53c500_resume,
};
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index fe0d8b8e91c87..7d22dc0478d3f 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -63,6 +63,33 @@ config SERIAL_8250_CONSOLE
If unsure, say N.
+config SERIAL_8250_GSC
+ tristate
+ depends on SERIAL_8250 && GSC
+ default SERIAL_8250
+
+config SERIAL_8250_PCI
+ tristate "8250/16550 PCI device support" if EMBEDDED
+ depends on SERIAL_8250 && PCI
+ default SERIAL_8250
+ help
+ This builds standard PCI serial support. You may be able to
+ disable this feature if you only need legacy serial support.
+ Saves about 9K.
+
+config SERIAL_8250_PNP
+ tristate "8250/16550 PNP device support" if EMBEDDED
+ depends on SERIAL_8250 && PNP
+ default SERIAL_8250
+ help
+ This builds standard PNP serial support. You may be able to
+ disable this feature if you only need legacy serial support.
+
+config SERIAL_8250_HP300
+ tristate
+ depends on SERIAL_8250 && HP300
+ default SERIAL_8250
+
config SERIAL_8250_CS
tristate "8250/16550 PCMCIA device support"
depends on PCMCIA && SERIAL_8250
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index d2b4c214876b8..0a71bf68a03ff 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -4,15 +4,13 @@
# $Id: Makefile,v 1.8 2002/07/21 21:32:30 rmk Exp $
#
-serial-8250-y :=
-serial-8250-$(CONFIG_PNP) += 8250_pnp.o
-serial-8250-$(CONFIG_GSC) += 8250_gsc.o
-serial-8250-$(CONFIG_PCI) += 8250_pci.o
-serial-8250-$(CONFIG_HP300) += 8250_hp300.o
-
obj-$(CONFIG_SERIAL_CORE) += serial_core.o
obj-$(CONFIG_SERIAL_21285) += 21285.o
-obj-$(CONFIG_SERIAL_8250) += 8250.o $(serial-8250-y)
+obj-$(CONFIG_SERIAL_8250) += 8250.o
+obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
+obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
+obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
+obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index dfc1e86d3aa11..043f50b1d10c7 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -20,7 +20,7 @@
*
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
- * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com>
+ * Wendy Xiong <wendyx@us.ibm.com>
*
***********************************************************************/
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index b1b66e71d2811..b3e1f71be4da0 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -20,7 +20,7 @@
*
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
- * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com>
+ * Wendy Xiong <wendyx@us.ibm.com>
*
*
***********************************************************************/
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/serial/jsm/jsm_neo.c
index 87e4e2cf8ce75..a5fc589d6ef5d 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/serial/jsm/jsm_neo.c
@@ -20,7 +20,7 @@
*
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
- * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com>
+ * Wendy Xiong <wendyx@us.ibm.com>
*
***********************************************************************/
#include <linux/delay.h> /* For udelay */
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 4d48b625cd3d9..7d823705193cf 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -142,12 +142,14 @@ static void jsm_tty_send_xchar(struct uart_port *port, char ch)
{
unsigned long lock_flags;
struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct termios *termios;
spin_lock_irqsave(&port->lock, lock_flags);
- if (ch == port->info->tty->termios->c_cc[VSTART])
+ termios = port->info->tty->termios;
+ if (ch == termios->c_cc[VSTART])
channel->ch_bd->bd_ops->send_start_character(channel);
- if (ch == port->info->tty->termios->c_cc[VSTOP])
+ if (ch == termios->c_cc[VSTOP])
channel->ch_bd->bd_ops->send_stop_character(channel);
spin_unlock_irqrestore(&port->lock, lock_flags);
}
@@ -178,6 +180,7 @@ static int jsm_tty_open(struct uart_port *port)
struct jsm_board *brd;
int rc = 0;
struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct termios *termios;
/* Get board pointer from our array of majors we have allocated */
brd = channel->ch_bd;
@@ -239,12 +242,13 @@ static int jsm_tty_open(struct uart_port *port)
channel->ch_cached_lsr = 0;
channel->ch_stops_sent = 0;
- channel->ch_c_cflag = port->info->tty->termios->c_cflag;
- channel->ch_c_iflag = port->info->tty->termios->c_iflag;
- channel->ch_c_oflag = port->info->tty->termios->c_oflag;
- channel->ch_c_lflag = port->info->tty->termios->c_lflag;
- channel->ch_startc = port->info->tty->termios->c_cc[VSTART];
- channel->ch_stopc = port->info->tty->termios->c_cc[VSTOP];
+ termios = port->info->tty->termios;
+ channel->ch_c_cflag = termios->c_cflag;
+ channel->ch_c_iflag = termios->c_iflag;
+ channel->ch_c_oflag = termios->c_oflag;
+ channel->ch_c_lflag = termios->c_lflag;
+ channel->ch_startc = termios->c_cc[VSTART];
+ channel->ch_stopc = termios->c_cc[VSTOP];
/* Tell UART to init itself */
brd->bd_ops->uart_init(channel);
@@ -784,6 +788,7 @@ static void jsm_carrier(struct jsm_channel *ch)
void jsm_check_queue_flow_control(struct jsm_channel *ch)
{
+ struct board_ops *bd_ops = ch->ch_bd->bd_ops;
int qleft = 0;
/* Store how much space we have left in the queue */
@@ -809,7 +814,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
/* HWFLOW */
if (ch->ch_c_cflag & CRTSCTS) {
if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
- ch->ch_bd->bd_ops->disable_receiver(ch);
+ bd_ops->disable_receiver(ch);
ch->ch_flags |= (CH_RECEIVER_OFF);
jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
"Internal queue hit hilevel mark (%d)! Turning off interrupts.\n",
@@ -819,7 +824,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
/* SWFLOW */
else if (ch->ch_c_iflag & IXOFF) {
if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
- ch->ch_bd->bd_ops->send_stop_character(ch);
+ bd_ops->send_stop_character(ch);
ch->ch_stops_sent++;
jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
"Sending stop char! Times sent: %x\n", ch->ch_stops_sent);
@@ -846,7 +851,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
/* HWFLOW */
if (ch->ch_c_cflag & CRTSCTS) {
if (ch->ch_flags & CH_RECEIVER_OFF) {
- ch->ch_bd->bd_ops->enable_receiver(ch);
+ bd_ops->enable_receiver(ch);
ch->ch_flags &= ~(CH_RECEIVER_OFF);
jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
"Internal queue hit lowlevel mark (%d)! Turning on interrupts.\n",
@@ -856,7 +861,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
/* SWFLOW */
else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
ch->ch_stops_sent = 0;
- ch->ch_bd->bd_ops->send_start_character(ch);
+ bd_ops->send_start_character(ch);
jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Sending start char!\n");
}
}
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index c30333694fdef..2c70773543e05 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/serial_core.h>
+#include <linux/delay.h>
#include <linux/major.h>
#include <asm/io.h>
#include <asm/system.h>
@@ -97,11 +98,13 @@ static const struct multi_id multi_id[] = {
#define MULTI_COUNT (sizeof(multi_id)/sizeof(struct multi_id))
struct serial_info {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
int ndev;
int multi;
int slave;
int manfid;
+ int prodid;
+ int c950ctrl;
dev_node_t node[4];
int line[4];
};
@@ -113,9 +116,36 @@ struct serial_cfg_mem {
};
-static void serial_config(dev_link_t * link);
+static int serial_config(struct pcmcia_device * link);
+static void wakeup_card(struct serial_info *info)
+{
+ int ctrl = info->c950ctrl;
+
+ if (info->manfid == MANFID_OXSEMI) {
+ outb(12, ctrl + 1);
+ } else if (info->manfid == MANFID_POSSIO && info->prodid == PRODID_POSSIO_GCC) {
+ /* request_region? oxsemi branch does no request_region too... */
+ /* This sequence is needed to properly initialize MC45 attached to OXCF950.
+ * I tried decreasing these msleep()s, but it worked properly (survived
+ * 1000 stop/start operations) with these timeouts (or bigger). */
+ outb(0xA, ctrl + 1);
+ msleep(100);
+ outb(0xE, ctrl + 1);
+ msleep(300);
+ outb(0xC, ctrl + 1);
+ msleep(100);
+ outb(0xE, ctrl + 1);
+ msleep(200);
+ outb(0xF, ctrl + 1);
+ msleep(100);
+ outb(0xE, ctrl + 1);
+ msleep(100);
+ outb(0xC, ctrl + 1);
+ }
+}
+
/*======================================================================
After a card is removed, serial_remove() will unregister
@@ -123,67 +153,45 @@ static void serial_config(dev_link_t * link);
======================================================================*/
-static void serial_remove(dev_link_t *link)
+static void serial_remove(struct pcmcia_device *link)
{
struct serial_info *info = link->priv;
int i;
- link->state &= ~DEV_PRESENT;
-
DEBUG(0, "serial_release(0x%p)\n", link);
/*
* Recheck to see if the device is still configured.
*/
- if (info->link.state & DEV_CONFIG) {
- for (i = 0; i < info->ndev; i++)
- serial8250_unregister_port(info->line[i]);
+ for (i = 0; i < info->ndev; i++)
+ serial8250_unregister_port(info->line[i]);
- info->link.dev = NULL;
+ info->p_dev->dev_node = NULL;
- if (!info->slave) {
- pcmcia_release_configuration(info->link.handle);
- pcmcia_release_io(info->link.handle, &info->link.io);
- pcmcia_release_irq(info->link.handle, &info->link.irq);
- }
-
- info->link.state &= ~DEV_CONFIG;
- }
+ if (!info->slave)
+ pcmcia_disable_device(link);
}
-static int serial_suspend(struct pcmcia_device *dev)
+static int serial_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
- link->state |= DEV_SUSPEND;
-
- if (link->state & DEV_CONFIG) {
- struct serial_info *info = link->priv;
- int i;
-
- for (i = 0; i < info->ndev; i++)
- serial8250_suspend_port(info->line[i]);
+ struct serial_info *info = link->priv;
+ int i;
- if (!info->slave)
- pcmcia_release_configuration(link->handle);
- }
+ for (i = 0; i < info->ndev; i++)
+ serial8250_suspend_port(info->line[i]);
return 0;
}
-static int serial_resume(struct pcmcia_device *dev)
+static int serial_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
- link->state &= ~DEV_SUSPEND;
-
- if (DEV_OK(link)) {
+ if (pcmcia_dev_present(link)) {
struct serial_info *info = link->priv;
int i;
- if (!info->slave)
- pcmcia_request_configuration(link->handle, &link->conf);
-
for (i = 0; i < info->ndev; i++)
serial8250_resume_port(info->line[i]);
+ wakeup_card(info);
}
return 0;
@@ -197,10 +205,9 @@ static int serial_resume(struct pcmcia_device *dev)
======================================================================*/
-static int serial_probe(struct pcmcia_device *p_dev)
+static int serial_probe(struct pcmcia_device *link)
{
struct serial_info *info;
- dev_link_t *link;
DEBUG(0, "serial_attach()\n");
@@ -209,7 +216,7 @@ static int serial_probe(struct pcmcia_device *p_dev)
if (!info)
return -ENOMEM;
memset(info, 0, sizeof (*info));
- link = &info->link;
+ info->p_dev = link;
link->priv = info;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -223,12 +230,7 @@ static int serial_probe(struct pcmcia_device *p_dev)
}
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- serial_config(link);
-
- return 0;
+ return serial_config(link);
}
/*======================================================================
@@ -240,9 +242,8 @@ static int serial_probe(struct pcmcia_device *p_dev)
======================================================================*/
-static void serial_detach(struct pcmcia_device *p_dev)
+static void serial_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct serial_info *info = link->priv;
DEBUG(0, "serial_detach(0x%p)\n", link);
@@ -263,7 +264,7 @@ static void serial_detach(struct pcmcia_device *p_dev)
/*====================================================================*/
-static int setup_serial(client_handle_t handle, struct serial_info * info,
+static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
kio_addr_t iobase, int irq)
{
struct uart_port port;
@@ -298,7 +299,7 @@ static int setup_serial(client_handle_t handle, struct serial_info * info,
/*====================================================================*/
static int
-first_tuple(client_handle_t handle, tuple_t * tuple, cisparse_t * parse)
+first_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse)
{
int i;
i = pcmcia_get_first_tuple(handle, tuple);
@@ -311,7 +312,7 @@ first_tuple(client_handle_t handle, tuple_t * tuple, cisparse_t * parse)
}
static int
-next_tuple(client_handle_t handle, tuple_t * tuple, cisparse_t * parse)
+next_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse)
{
int i;
i = pcmcia_get_next_tuple(handle, tuple);
@@ -325,11 +326,10 @@ next_tuple(client_handle_t handle, tuple_t * tuple, cisparse_t * parse)
/*====================================================================*/
-static int simple_config(dev_link_t *link)
+static int simple_config(struct pcmcia_device *link)
{
static const kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
static const int size_table[2] = { 8, 16 };
- client_handle_t handle = link->handle;
struct serial_info *info = link->priv;
struct serial_cfg_mem *cfg_mem;
tuple_t *tuple;
@@ -350,7 +350,7 @@ static int simple_config(dev_link_t *link)
buf = cfg_mem->buf;
/* If the card is already configured, look up the port and irq */
- i = pcmcia_get_configuration_info(handle, &config);
+ i = pcmcia_get_configuration_info(link, &config);
if ((i == CS_SUCCESS) && (config.Attributes & CONF_VALID_CLIENT)) {
kio_addr_t port = 0;
if ((config.BasePort2 != 0) && (config.NumPorts2 == 8)) {
@@ -363,10 +363,9 @@ static int simple_config(dev_link_t *link)
}
if (info->slave) {
kfree(cfg_mem);
- return setup_serial(handle, info, port, config.AssignedIRQ);
+ return setup_serial(link, info, port, config.AssignedIRQ);
}
}
- link->conf.Vcc = config.Vcc;
/* First pass: look for a config entry that looks normal. */
tuple->TupleData = (cisdata_t *) buf;
@@ -377,12 +376,12 @@ static int simple_config(dev_link_t *link)
/* Two tries: without IO aliases, then with aliases */
for (s = 0; s < 2; s++) {
for (try = 0; try < 2; try++) {
- i = first_tuple(handle, tuple, parse);
+ i = first_tuple(link, tuple, parse);
while (i != CS_NO_MORE_ITEMS) {
if (i != CS_SUCCESS)
goto next_entry;
if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
if ((cf->io.nwin > 0) && (cf->io.win[0].len == size_table[s]) &&
(cf->io.win[0].base != 0)) {
@@ -390,19 +389,19 @@ static int simple_config(dev_link_t *link)
link->io.BasePort1 = cf->io.win[0].base;
link->io.IOAddrLines = (try == 0) ?
16 : cf->io.flags & CISTPL_IO_LINES_MASK;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
goto found_port;
}
next_entry:
- i = next_tuple(handle, tuple, parse);
+ i = next_tuple(link, tuple, parse);
}
}
}
/* Second pass: try to find an entry that isn't picky about
its base address, then try to grab any standard serial port
address, and finally try to get any free port. */
- i = first_tuple(handle, tuple, parse);
+ i = first_tuple(link, tuple, parse);
while (i != CS_NO_MORE_ITEMS) {
if ((i == CS_SUCCESS) && (cf->io.nwin > 0) &&
((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
@@ -410,50 +409,48 @@ next_entry:
for (j = 0; j < 5; j++) {
link->io.BasePort1 = base[j];
link->io.IOAddrLines = base[j] ? 16 : 3;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
if (i == CS_SUCCESS)
goto found_port;
}
}
- i = next_tuple(handle, tuple, parse);
+ i = next_tuple(link, tuple, parse);
}
found_port:
if (i != CS_SUCCESS) {
printk(KERN_NOTICE
"serial_cs: no usable port range found, giving up\n");
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
kfree(cfg_mem);
return -1;
}
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIRQ, i);
+ cs_error(link, RequestIRQ, i);
link->irq.AssignedIRQ = 0;
}
if (info->multi && (info->manfid == MANFID_3COM))
link->conf.ConfigIndex &= ~(0x08);
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, i);
+ cs_error(link, RequestConfiguration, i);
kfree(cfg_mem);
return -1;
}
kfree(cfg_mem);
- return setup_serial(handle, info, link->io.BasePort1, link->irq.AssignedIRQ);
+ return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
}
-static int multi_config(dev_link_t * link)
+static int multi_config(struct pcmcia_device * link)
{
- client_handle_t handle = link->handle;
struct serial_info *info = link->priv;
struct serial_cfg_mem *cfg_mem;
tuple_t *tuple;
u_char *buf;
cisparse_t *parse;
cistpl_cftable_entry_t *cf;
- config_info_t config;
int i, rc, base2 = 0;
cfg_mem = kmalloc(sizeof(struct serial_cfg_mem), GFP_KERNEL);
@@ -464,14 +461,6 @@ static int multi_config(dev_link_t * link)
cf = &parse->cftable_entry;
buf = cfg_mem->buf;
- i = pcmcia_get_configuration_info(handle, &config);
- if (i != CS_SUCCESS) {
- cs_error(handle, GetConfigurationInfo, i);
- rc = -1;
- goto free_cfg_mem;
- }
- link->conf.Vcc = config.Vcc;
-
tuple->TupleData = (cisdata_t *) buf;
tuple->TupleOffset = 0;
tuple->TupleDataMax = 255;
@@ -480,7 +469,7 @@ static int multi_config(dev_link_t * link)
/* First, look for a generic full-sized window */
link->io.NumPorts1 = info->multi * 8;
- i = first_tuple(handle, tuple, parse);
+ i = first_tuple(link, tuple, parse);
while (i != CS_NO_MORE_ITEMS) {
/* The quad port cards have bad CIS's, so just look for a
window larger than 8 ports and assume it will be right */
@@ -490,19 +479,19 @@ static int multi_config(dev_link_t * link)
link->io.BasePort1 = cf->io.win[0].base;
link->io.IOAddrLines =
cf->io.flags & CISTPL_IO_LINES_MASK;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
base2 = link->io.BasePort1 + 8;
if (i == CS_SUCCESS)
break;
}
- i = next_tuple(handle, tuple, parse);
+ i = next_tuple(link, tuple, parse);
}
/* If that didn't work, look for two windows */
if (i != CS_SUCCESS) {
link->io.NumPorts1 = link->io.NumPorts2 = 8;
info->multi = 2;
- i = first_tuple(handle, tuple, parse);
+ i = first_tuple(link, tuple, parse);
while (i != CS_NO_MORE_ITEMS) {
if ((i == CS_SUCCESS) && (cf->io.nwin == 2)) {
link->conf.ConfigIndex = cf->index;
@@ -510,26 +499,26 @@ static int multi_config(dev_link_t * link)
link->io.BasePort2 = cf->io.win[1].base;
link->io.IOAddrLines =
cf->io.flags & CISTPL_IO_LINES_MASK;
- i = pcmcia_request_io(link->handle, &link->io);
+ i = pcmcia_request_io(link, &link->io);
base2 = link->io.BasePort2;
if (i == CS_SUCCESS)
break;
}
- i = next_tuple(handle, tuple, parse);
+ i = next_tuple(link, tuple, parse);
}
}
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestIO, i);
+ cs_error(link, RequestIO, i);
rc = -1;
goto free_cfg_mem;
}
- i = pcmcia_request_irq(link->handle, &link->irq);
+ i = pcmcia_request_irq(link, &link->irq);
if (i != CS_SUCCESS) {
printk(KERN_NOTICE
"serial_cs: no usable port range found, giving up\n");
- cs_error(link->handle, RequestIRQ, i);
+ cs_error(link, RequestIRQ, i);
link->irq.AssignedIRQ = 0;
}
/* Socket Dual IO: this enables irq's for second port */
@@ -537,35 +526,43 @@ static int multi_config(dev_link_t * link)
link->conf.Present |= PRESENT_EXT_STATUS;
link->conf.ExtStatus = ESR_REQ_ATTN_ENA;
}
- i = pcmcia_request_configuration(link->handle, &link->conf);
+ i = pcmcia_request_configuration(link, &link->conf);
if (i != CS_SUCCESS) {
- cs_error(link->handle, RequestConfiguration, i);
+ cs_error(link, RequestConfiguration, i);
rc = -1;
goto free_cfg_mem;
}
/* The Oxford Semiconductor OXCF950 cards are in fact single-port:
- 8 registers are for the UART, the others are extra registers */
- if (info->manfid == MANFID_OXSEMI) {
+ * 8 registers are for the UART, the others are extra registers.
+ * Siemen's MC45 PCMCIA (Possio's GCC) is OXCF950 based too.
+ */
+ if (info->manfid == MANFID_OXSEMI || (info->manfid == MANFID_POSSIO &&
+ info->prodid == PRODID_POSSIO_GCC)) {
+ int err;
+
if (cf->index == 1 || cf->index == 3) {
- setup_serial(handle, info, base2, link->irq.AssignedIRQ);
- outb(12, link->io.BasePort1 + 1);
+ err = setup_serial(link, info, base2,
+ link->irq.AssignedIRQ);
+ base2 = link->io.BasePort1;
} else {
- setup_serial(handle, info, link->io.BasePort1, link->irq.AssignedIRQ);
- outb(12, base2 + 1);
+ err = setup_serial(link, info, link->io.BasePort1,
+ link->irq.AssignedIRQ);
}
+ info->c950ctrl = base2;
+ wakeup_card(info);
rc = 0;
goto free_cfg_mem;
}
- setup_serial(handle, info, link->io.BasePort1, link->irq.AssignedIRQ);
+ setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
/* The Nokia cards are not really multiport cards */
if (info->manfid == MANFID_NOKIA) {
rc = 0;
goto free_cfg_mem;
}
for (i = 0; i < info->multi - 1; i++)
- setup_serial(handle, info, base2 + (8 * i),
+ setup_serial(link, info, base2 + (8 * i),
link->irq.AssignedIRQ);
rc = 0;
free_cfg_mem:
@@ -581,9 +578,8 @@ free_cfg_mem:
======================================================================*/
-void serial_config(dev_link_t * link)
+static int serial_config(struct pcmcia_device * link)
{
- client_handle_t handle = link->handle;
struct serial_info *info = link->priv;
struct serial_cfg_mem *cfg_mem;
tuple_t *tuple;
@@ -609,7 +605,7 @@ void serial_config(dev_link_t * link)
tuple->Attributes = 0;
/* Get configuration register information */
tuple->DesiredTuple = CISTPL_CONFIG;
- last_ret = first_tuple(handle, tuple, parse);
+ last_ret = first_tuple(link, tuple, parse);
if (last_ret != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
@@ -617,18 +613,16 @@ void serial_config(dev_link_t * link)
link->conf.ConfigBase = parse->config.base;
link->conf.Present = parse->config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/* Is this a compliant multifunction card? */
tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
tuple->Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK;
- info->multi = (first_tuple(handle, tuple, parse) == CS_SUCCESS);
+ info->multi = (first_tuple(link, tuple, parse) == CS_SUCCESS);
/* Is this a multiport card? */
tuple->DesiredTuple = CISTPL_MANFID;
- if (first_tuple(handle, tuple, parse) == CS_SUCCESS) {
+ if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
info->manfid = parse->manfid.manf;
+ info->prodid = le16_to_cpu(buf[1]);
for (i = 0; i < MULTI_COUNT; i++)
if ((info->manfid == multi_id[i].manfid) &&
(parse->manfid.card == multi_id[i].prodid))
@@ -641,11 +635,11 @@ void serial_config(dev_link_t * link)
multifunction cards that ask for appropriate IO port ranges */
tuple->DesiredTuple = CISTPL_FUNCID;
if ((info->multi == 0) &&
- ((first_tuple(handle, tuple, parse) != CS_SUCCESS) ||
+ ((first_tuple(link, tuple, parse) != CS_SUCCESS) ||
(parse->funcid.func == CISTPL_FUNCID_MULTI) ||
(parse->funcid.func == CISTPL_FUNCID_SERIAL))) {
tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
- if (first_tuple(handle, tuple, parse) == CS_SUCCESS) {
+ if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
info->multi = cf->io.win[0].len >> 3;
if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) &&
@@ -664,31 +658,30 @@ void serial_config(dev_link_t * link)
if (info->manfid == MANFID_IBM) {
conf_reg_t reg = { 0, CS_READ, 0x800, 0 };
- last_ret = pcmcia_access_configuration_register(link->handle, &reg);
+ last_ret = pcmcia_access_configuration_register(link, &reg);
if (last_ret) {
last_fn = AccessConfigurationRegister;
goto cs_failed;
}
reg.Action = CS_WRITE;
reg.Value = reg.Value | 1;
- last_ret = pcmcia_access_configuration_register(link->handle, &reg);
+ last_ret = pcmcia_access_configuration_register(link, &reg);
if (last_ret) {
last_fn = AccessConfigurationRegister;
goto cs_failed;
}
}
- link->dev = &info->node[0];
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &info->node[0];
kfree(cfg_mem);
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
serial_remove(link);
- link->state &= ~DEV_CONFIG_PENDING;
kfree(cfg_mem);
+ return -ENODEV;
}
static struct pcmcia_device_id serial_ids[] = {
@@ -739,6 +732,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_MFC_DEVICE_PROD_ID1(1, "Motorola MARQUIS", 0xf03e4e77),
PCMCIA_MFC_DEVICE_PROD_ID2(1, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302),
PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0301),
+ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x0276),
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0039),
PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0006),
PCMCIA_DEVICE_MANF_CARD(0x0105, 0x410a),
@@ -757,6 +751,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_DEVICE_PROD_ID14("MEGAHERTZ", "PCMCIA MODEM", 0xf510db04, 0xbd6c43ef),
PCMCIA_DEVICE_PROD_ID124("TOSHIBA", "T144PF", "PCMCIA MODEM", 0xb4585a1a, 0x7271409c, 0xbd6c43ef),
PCMCIA_DEVICE_PROD_ID123("FUJITSU", "FC14F ", "MBH10213", 0x6ee5a3d8, 0x30ead12b, 0xb00f05a0),
+ PCMCIA_DEVICE_PROD_ID123("Novatel Wireless", "Merlin UMTS Modem", "U630", 0x32607776, 0xd9e73b13, 0xe87332e),
PCMCIA_DEVICE_PROD_ID13("MEGAHERTZ", "V.34 PCMCIA MODEM", 0xf510db04, 0xbb2cce4a),
PCMCIA_DEVICE_PROD_ID12("Brain Boxes", "Bluetooth PC Card", 0xee138382, 0xd4ce9b02),
PCMCIA_DEVICE_PROD_ID12("CIRRUS LOGIC", "FAX MODEM", 0xe625f451, 0xcecd6dfa),
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index d3a7b0c3d38bf..dda0ca45d904f 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -35,73 +35,52 @@ typedef struct ixj_info_t {
} ixj_info_t;
static void ixj_detach(struct pcmcia_device *p_dev);
-static void ixj_config(dev_link_t * link);
-static void ixj_cs_release(dev_link_t * link);
+static int ixj_config(struct pcmcia_device * link);
+static void ixj_cs_release(struct pcmcia_device * link);
-static int ixj_attach(struct pcmcia_device *p_dev)
+static int ixj_probe(struct pcmcia_device *p_dev)
{
- dev_link_t *link;
-
DEBUG(0, "ixj_attach()\n");
/* Create new ixj device */
- link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
- if (!link)
- return -ENOMEM;
- memset(link, 0, sizeof(struct dev_link_t));
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->io.IOAddrLines = 3;
- link->conf.Vcc = 50;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->priv = kmalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
- if (!link->priv) {
- kfree(link);
+ p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ p_dev->io.IOAddrLines = 3;
+ p_dev->conf.IntType = INT_MEMORY_AND_IO;
+ p_dev->priv = kmalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
+ if (!p_dev->priv) {
return -ENOMEM;
}
- memset(link->priv, 0, sizeof(struct ixj_info_t));
-
- link->handle = p_dev;
- p_dev->instance = link;
+ memset(p_dev->priv, 0, sizeof(struct ixj_info_t));
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- ixj_config(link);
-
- return 0;
+ return ixj_config(p_dev);
}
-static void ixj_detach(struct pcmcia_device *p_dev)
+static void ixj_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DEBUG(0, "ixj_detach(0x%p)\n", link);
- link->state &= ~DEV_RELEASE_PENDING;
- if (link->state & DEV_CONFIG)
- ixj_cs_release(link);
+ ixj_cs_release(link);
kfree(link->priv);
- kfree(link);
}
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void ixj_get_serial(dev_link_t * link, IXJ * j)
+static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
{
- client_handle_t handle;
tuple_t tuple;
u_short buf[128];
char *str;
int last_ret, last_fn, i, place;
- handle = link->handle;
DEBUG(0, "ixj_get_serial(0x%p)\n", link);
tuple.TupleData = (cisdata_t *) buf;
tuple.TupleOffset = 0;
tuple.TupleDataMax = 80;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_VERS_1;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
str = (char *) buf;
printk("PCMCIA Version %d.%d\n", str[0], str[1]);
str += 2;
@@ -149,22 +128,19 @@ static void ixj_get_serial(dev_link_t * link, IXJ * j)
return;
}
-static void ixj_config(dev_link_t * link)
+static int ixj_config(struct pcmcia_device * link)
{
IXJ *j;
- client_handle_t handle;
ixj_info_t *info;
tuple_t tuple;
u_short buf[128];
cisparse_t parse;
- config_info_t conf;
cistpl_cftable_entry_t *cfg = &parse.cftable_entry;
cistpl_cftable_entry_t dflt =
{
0
};
int last_ret, last_fn;
- handle = link->handle;
info = link->priv;
DEBUG(0, "ixj_config(0x%p)\n", link);
tuple.TupleData = (cisdata_t *) buf;
@@ -172,19 +148,17 @@ static void ixj_config(dev_link_t * link)
tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- link->state |= DEV_CONFIG;
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
- if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
- pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
+ pcmcia_parse_tuple(link, &tuple, &parse) != 0)
goto next_entry;
if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
@@ -195,7 +169,7 @@ static void ixj_config(dev_link_t * link)
link->io.BasePort2 = io->win[1].base;
link->io.NumPorts2 = io->win[1].len;
}
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
/* If we've got this far, we're done */
break;
@@ -203,10 +177,10 @@ static void ixj_config(dev_link_t * link)
next_entry:
if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
dflt = *cfg;
- CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+ CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
}
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
/*
* Register the card with the core.
@@ -215,46 +189,21 @@ static void ixj_config(dev_link_t * link)
info->ndev = 1;
info->node.major = PHONE_MAJOR;
- link->dev = &info->node;
+ link->dev_node = &info->node;
ixj_get_serial(link, j);
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
ixj_cs_release(link);
+ return -ENODEV;
}
-static void ixj_cs_release(dev_link_t *link)
+static void ixj_cs_release(struct pcmcia_device *link)
{
ixj_info_t *info = link->priv;
DEBUG(0, "ixj_cs_release(0x%p)\n", link);
info->ndev = 0;
- link->dev = NULL;
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- link->state &= ~DEV_CONFIG;
-}
-
-static int ixj_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int ixj_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (DEV_OK(link))
- pcmcia_request_configuration(link->handle, &link->conf);
-
- return 0;
+ pcmcia_disable_device(link);
}
static struct pcmcia_device_id ixj_ids[] = {
@@ -268,11 +217,9 @@ static struct pcmcia_driver ixj_driver = {
.drv = {
.name = "ixj_cs",
},
- .probe = ixj_attach,
+ .probe = ixj_probe,
.remove = ixj_detach,
.id_table = ixj_ids,
- .suspend = ixj_suspend,
- .resume = ixj_resume,
};
static int __init ixj_pcmcia_init(void)
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 134d2000128a5..302aa1ec312f5 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -67,11 +67,11 @@ module_param(pc_debug, int, 0644);
static const char driver_name[DEV_NAME_LEN] = "sl811_cs";
typedef struct local_info_t {
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
} local_info_t;
-static void sl811_cs_release(dev_link_t * link);
+static void sl811_cs_release(struct pcmcia_device * link);
/*====================================================================*/
@@ -138,41 +138,27 @@ static int sl811_hc_init(struct device *parent, ioaddr_t base_addr, int irq)
/*====================================================================*/
-static void sl811_cs_detach(struct pcmcia_device *p_dev)
+static void sl811_cs_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
-
DBG(0, "sl811_cs_detach(0x%p)\n", link);
- link->state &= ~DEV_PRESENT;
- if (link->state & DEV_CONFIG)
- sl811_cs_release(link);
+ sl811_cs_release(link);
/* This points to the parent local_info_t struct */
kfree(link->priv);
}
-static void sl811_cs_release(dev_link_t * link)
+static void sl811_cs_release(struct pcmcia_device * link)
{
-
DBG(0, "sl811_cs_release(0x%p)\n", link);
- /* Unlink the device chain */
- link->dev = NULL;
-
+ pcmcia_disable_device(link);
platform_device_unregister(&platform_dev);
- pcmcia_release_configuration(link->handle);
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- if (link->irq.AssignedIRQ)
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
}
-static void sl811_cs_config(dev_link_t *link)
+static int sl811_cs_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
- struct device *parent = &handle_to_dev(handle);
+ struct device *parent = &handle_to_dev(link);
local_info_t *dev = link->priv;
tuple_t tuple;
cisparse_t parse;
@@ -188,27 +174,23 @@ static void sl811_cs_config(dev_link_t *link)
tuple.TupleData = buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
- /* Configure card */
- link->state |= DEV_CONFIG;
-
/* Look up the current Vcc */
CS_CHECK(GetConfigurationInfo,
- pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
+ pcmcia_get_configuration_info(link, &conf));
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (1) {
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
- if (pcmcia_get_tuple_data(handle, &tuple) != 0
- || pcmcia_parse_tuple(handle, &tuple, &parse)
+ if (pcmcia_get_tuple_data(link, &tuple) != 0
+ || pcmcia_parse_tuple(link, &tuple, &parse)
!= 0)
goto next_entry;
@@ -234,10 +216,10 @@ static void sl811_cs_config(dev_link_t *link)
}
if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
- link->conf.Vpp1 = link->conf.Vpp2 =
+ link->conf.Vpp =
dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
/* we need an interrupt */
@@ -254,15 +236,14 @@ static void sl811_cs_config(dev_link_t *link)
link->io.BasePort1 = io->win[0].base;
link->io.NumPorts1 = io->win[0].len;
- if (pcmcia_request_io(link->handle, &link->io) != 0)
+ if (pcmcia_request_io(link, &link->io) != 0)
goto next_entry;
}
break;
next_entry:
- if (link->io.NumPorts1)
- pcmcia_release_io(link->handle, &link->io);
- last_ret = pcmcia_get_next_tuple(handle, &tuple);
+ pcmcia_disable_device(link);
+ last_ret = pcmcia_get_next_tuple(link, &tuple);
}
/* require an IRQ and two registers */
@@ -270,71 +251,46 @@ next_entry:
goto cs_failed;
if (link->conf.Attributes & CONF_ENABLE_IRQ)
CS_CHECK(RequestIRQ,
- pcmcia_request_irq(link->handle, &link->irq));
+ pcmcia_request_irq(link, &link->irq));
else
goto cs_failed;
CS_CHECK(RequestConfiguration,
- pcmcia_request_configuration(link->handle, &link->conf));
+ pcmcia_request_configuration(link, &link->conf));
sprintf(dev->node.dev_name, driver_name);
dev->node.major = dev->node.minor = 0;
- link->dev = &dev->node;
+ link->dev_node = &dev->node;
- printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
- dev->node.dev_name, link->conf.ConfigIndex,
- link->conf.Vcc/10, link->conf.Vcc%10);
- if (link->conf.Vpp1)
- printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ printk(KERN_INFO "%s: index 0x%02x: ",
+ dev->node.dev_name, link->conf.ConfigIndex);
+ if (link->conf.Vpp)
+ printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
printk(", irq %d", link->irq.AssignedIRQ);
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
printk("\n");
- link->state &= ~DEV_CONFIG_PENDING;
-
if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ)
< 0) {
cs_failed:
printk("sl811_cs_config failed\n");
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
sl811_cs_release(link);
- link->state &= ~DEV_CONFIG_PENDING;
+ return -ENODEV;
}
-}
-
-static int sl811_suspend(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state |= DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
- return 0;
-}
-
-static int sl811_resume(struct pcmcia_device *dev)
-{
- dev_link_t *link = dev_to_instance(dev);
-
- link->state &= ~DEV_SUSPEND;
- if (link->state & DEV_CONFIG)
- pcmcia_request_configuration(link->handle, &link->conf);
-
return 0;
}
-static int sl811_cs_attach(struct pcmcia_device *p_dev)
+static int sl811_cs_probe(struct pcmcia_device *link)
{
local_info_t *local;
- dev_link_t *link;
local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local)
return -ENOMEM;
memset(local, 0, sizeof(local_info_t));
- link = &local->link;
+ local->p_dev = link;
link->priv = local;
/* Initialize */
@@ -343,16 +299,9 @@ static int sl811_cs_attach(struct pcmcia_device *p_dev)
link->irq.Handler = NULL;
link->conf.Attributes = 0;
- link->conf.Vcc = 33;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->handle = p_dev;
- p_dev->instance = link;
-
- link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- sl811_cs_config(link);
-
- return 0;
+ return sl811_cs_config(link);
}
static struct pcmcia_device_id sl811_ids[] = {
@@ -366,11 +315,9 @@ static struct pcmcia_driver sl811_cs_driver = {
.drv = {
.name = (char *)driver_name,
},
- .probe = sl811_cs_attach,
+ .probe = sl811_cs_probe,
.remove = sl811_cs_detach,
.id_table = sl811_ids,
- .suspend = sl811_suspend,
- .resume = sl811_resume,
};
/*====================================================================*/
diff --git a/drivers/usb/input/hid-input.c b/drivers/usb/input/hid-input.c
index cb0d80f492520..25bc85f8ce39b 100644
--- a/drivers/usb/input/hid-input.c
+++ b/drivers/usb/input/hid-input.c
@@ -510,7 +510,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x025: map_key_clear(KEY_TV); break;
case 0x026: map_key_clear(KEY_MENU); break;
case 0x031: map_key_clear(KEY_AUDIO); break;
- case 0x032: map_key_clear(KEY_SUBTITLE); break;
+ case 0x032: map_key_clear(KEY_TEXT); break;
case 0x033: map_key_clear(KEY_LAST); break;
case 0x047: map_key_clear(KEY_MP3); break;
case 0x048: map_key_clear(KEY_DVD); break;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 22e9d696fdd21..f87c0171f4eca 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -904,18 +904,6 @@ config FB_MATROX_MULTIHEAD
There is no need for enabling 'Matrox multihead support' if you have
only one Matrox card in the box.
-config FB_RADEON_OLD
- tristate "ATI Radeon display support (Old driver)"
- depends on FB && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB_MACMODES if PPC
- help
- Choose this option if you want to use an ATI Radeon graphics card as
- a framebuffer device. There are both PCI and AGP versions. You
- don't need to choose this to run the Radeon in plain VGA mode.
-
config FB_RADEON
tristate "ATI Radeon display support"
depends on FB && PCI
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index cb90218515ac4..23de3b2c78564 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_FB_KYRO) += kyro/
obj-$(CONFIG_FB_SAVAGE) += savage/
obj-$(CONFIG_FB_GEODE) += geode/
obj-$(CONFIG_FB_I810) += vgastate.o
-obj-$(CONFIG_FB_RADEON_OLD) += radeonfb.o
obj-$(CONFIG_FB_NEOMAGIC) += neofb.o vgastate.o
obj-$(CONFIG_FB_VIRGE) += virgefb.o
obj-$(CONFIG_FB_3DFX) += tdfxfb.o
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 9d996f2c10d57..b895eaaa73fd8 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -43,11 +43,11 @@ config LCD_DEVICE
default y
config BACKLIGHT_CORGI
- tristate "Sharp Corgi Backlight Driver (SL-C7xx Series)"
+ tristate "Sharp Corgi Backlight Driver (SL Series)"
depends on BACKLIGHT_DEVICE && PXA_SHARPSL
default y
help
- If you have a Sharp Zaurus SL-C7xx, say y to enable the
+ If you have a Sharp Zaurus SL-C7xx, SL-Cxx00 or SL-6000x say y to enable the
backlight driver.
config BACKLIGHT_HP680
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 151fda8dded00..334b1db1bd7cc 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -16,14 +16,12 @@
static ssize_t backlight_show_power(struct class_device *cdev, char *buf)
{
- int rc;
+ int rc = -ENXIO;
struct backlight_device *bd = to_backlight_device(cdev);
down(&bd->sem);
- if (likely(bd->props && bd->props->get_power))
- rc = sprintf(buf, "%d\n", bd->props->get_power(bd));
- else
- rc = -ENXIO;
+ if (likely(bd->props))
+ rc = sprintf(buf, "%d\n", bd->props->power);
up(&bd->sem);
return rc;
@@ -31,7 +29,7 @@ static ssize_t backlight_show_power(struct class_device *cdev, char *buf)
static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count)
{
- int rc, power;
+ int rc = -ENXIO, power;
char *endp;
struct backlight_device *bd = to_backlight_device(cdev);
@@ -40,12 +38,13 @@ static ssize_t backlight_store_power(struct class_device *cdev, const char *buf,
return -EINVAL;
down(&bd->sem);
- if (likely(bd->props && bd->props->set_power)) {
+ if (likely(bd->props)) {
pr_debug("backlight: set power to %d\n", power);
- bd->props->set_power(bd, power);
+ bd->props->power = power;
+ if (likely(bd->props->update_status))
+ bd->props->update_status(bd);
rc = count;
- } else
- rc = -ENXIO;
+ }
up(&bd->sem);
return rc;
@@ -53,14 +52,12 @@ static ssize_t backlight_store_power(struct class_device *cdev, const char *buf,
static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf)
{
- int rc;
+ int rc = -ENXIO;
struct backlight_device *bd = to_backlight_device(cdev);
down(&bd->sem);
- if (likely(bd->props && bd->props->get_brightness))
- rc = sprintf(buf, "%d\n", bd->props->get_brightness(bd));
- else
- rc = -ENXIO;
+ if (likely(bd->props))
+ rc = sprintf(buf, "%d\n", bd->props->brightness);
up(&bd->sem);
return rc;
@@ -68,7 +65,7 @@ static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf)
static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count)
{
- int rc, brightness;
+ int rc = -ENXIO, brightness;
char *endp;
struct backlight_device *bd = to_backlight_device(cdev);
@@ -77,12 +74,18 @@ static ssize_t backlight_store_brightness(struct class_device *cdev, const char
return -EINVAL;
down(&bd->sem);
- if (likely(bd->props && bd->props->set_brightness)) {
- pr_debug("backlight: set brightness to %d\n", brightness);
- bd->props->set_brightness(bd, brightness);
- rc = count;
- } else
- rc = -ENXIO;
+ if (likely(bd->props)) {
+ if (brightness > bd->props->max_brightness)
+ rc = -EINVAL;
+ else {
+ pr_debug("backlight: set brightness to %d\n",
+ brightness);
+ bd->props->brightness = brightness;
+ if (likely(bd->props->update_status))
+ bd->props->update_status(bd);
+ rc = count;
+ }
+ }
up(&bd->sem);
return rc;
@@ -90,14 +93,26 @@ static ssize_t backlight_store_brightness(struct class_device *cdev, const char
static ssize_t backlight_show_max_brightness(struct class_device *cdev, char *buf)
{
- int rc;
+ int rc = -ENXIO;
struct backlight_device *bd = to_backlight_device(cdev);
down(&bd->sem);
if (likely(bd->props))
rc = sprintf(buf, "%d\n", bd->props->max_brightness);
- else
- rc = -ENXIO;
+ up(&bd->sem);
+
+ return rc;
+}
+
+static ssize_t backlight_show_actual_brightness(struct class_device *cdev,
+ char *buf)
+{
+ int rc = -ENXIO;
+ struct backlight_device *bd = to_backlight_device(cdev);
+
+ down(&bd->sem);
+ if (likely(bd->props && bd->props->get_brightness))
+ rc = sprintf(buf, "%d\n", bd->props->get_brightness(bd));
up(&bd->sem);
return rc;
@@ -123,7 +138,10 @@ static struct class backlight_class = {
static struct class_device_attribute bl_class_device_attributes[] = {
DECLARE_ATTR(power, 0644, backlight_show_power, backlight_store_power),
- DECLARE_ATTR(brightness, 0644, backlight_show_brightness, backlight_store_brightness),
+ DECLARE_ATTR(brightness, 0644, backlight_show_brightness,
+ backlight_store_brightness),
+ DECLARE_ATTR(actual_brightness, 0444, backlight_show_actual_brightness,
+ NULL),
DECLARE_ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL),
};
@@ -144,8 +162,12 @@ static int fb_notifier_callback(struct notifier_block *self,
bd = container_of(self, struct backlight_device, fb_notif);
down(&bd->sem);
if (bd->props)
- if (!bd->props->check_fb || bd->props->check_fb(evdata->info))
- bd->props->set_power(bd, *(int *)evdata->data);
+ if (!bd->props->check_fb ||
+ bd->props->check_fb(evdata->info)) {
+ bd->props->fb_blank = *(int *)evdata->data;
+ if (likely(bd->props && bd->props->update_status))
+ bd->props->update_status(bd);
+ }
up(&bd->sem);
return 0;
}
@@ -231,6 +253,12 @@ void backlight_device_unregister(struct backlight_device *bd)
&bl_class_device_attributes[i]);
down(&bd->sem);
+ if (likely(bd->props && bd->props->update_status)) {
+ bd->props->brightness = 0;
+ bd->props->power = 0;
+ bd->props->update_status(bd);
+ }
+
bd->props = NULL;
up(&bd->sem);
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index d0aaf450e8c72..2ebbfd95145fd 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -1,7 +1,7 @@
/*
- * Backlight Driver for Sharp Corgi
+ * Backlight Driver for Sharp Zaurus Handhelds (various models)
*
- * Copyright (c) 2004-2005 Richard Purdie
+ * Copyright (c) 2004-2006 Richard Purdie
*
* Based on Sharp's 2.4 Backlight Driver
*
@@ -15,80 +15,63 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/fb.h>
#include <linux/backlight.h>
-
#include <asm/arch/sharpsl.h>
#include <asm/hardware/sharpsl_pm.h>
-#define CORGI_DEFAULT_INTENSITY 0x1f
-#define CORGI_LIMIT_MASK 0x0b
-
-static int corgibl_powermode = FB_BLANK_UNBLANK;
-static int current_intensity = 0;
-static int corgibl_limit = 0;
-static void (*corgibl_mach_set_intensity)(int intensity);
-static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
+static int corgibl_intensity;
+static DEFINE_MUTEX(bl_mutex);
static struct backlight_properties corgibl_data;
+static struct backlight_device *corgi_backlight_device;
+static struct corgibl_machinfo *bl_machinfo;
-static void corgibl_send_intensity(int intensity)
+static unsigned long corgibl_flags;
+#define CORGIBL_SUSPENDED 0x01
+#define CORGIBL_BATTLOW 0x02
+
+static int corgibl_send_intensity(struct backlight_device *bd)
{
- unsigned long flags;
void (*corgi_kick_batt)(void);
+ int intensity = bd->props->brightness;
- if (corgibl_powermode != FB_BLANK_UNBLANK) {
+ if (bd->props->power != FB_BLANK_UNBLANK)
intensity = 0;
- } else {
- if (corgibl_limit)
- intensity &= CORGI_LIMIT_MASK;
- }
-
- spin_lock_irqsave(&bl_lock, flags);
+ if (bd->props->fb_blank != FB_BLANK_UNBLANK)
+ intensity = 0;
+ if (corgibl_flags & CORGIBL_SUSPENDED)
+ intensity = 0;
+ if (corgibl_flags & CORGIBL_BATTLOW)
+ intensity &= bl_machinfo->limit_mask;
- corgibl_mach_set_intensity(intensity);
+ mutex_lock(&bl_mutex);
+ bl_machinfo->set_bl_intensity(intensity);
+ mutex_unlock(&bl_mutex);
- spin_unlock_irqrestore(&bl_lock, flags);
+ corgibl_intensity = intensity;
corgi_kick_batt = symbol_get(sharpsl_battery_kick);
if (corgi_kick_batt) {
corgi_kick_batt();
symbol_put(sharpsl_battery_kick);
}
-}
-static void corgibl_blank(int blank)
-{
- switch(blank) {
-
- case FB_BLANK_NORMAL:
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_POWERDOWN:
- if (corgibl_powermode == FB_BLANK_UNBLANK) {
- corgibl_send_intensity(0);
- corgibl_powermode = blank;
- }
- break;
- case FB_BLANK_UNBLANK:
- if (corgibl_powermode != FB_BLANK_UNBLANK) {
- corgibl_powermode = blank;
- corgibl_send_intensity(current_intensity);
- }
- break;
- }
+ return 0;
}
#ifdef CONFIG_PM
static int corgibl_suspend(struct platform_device *dev, pm_message_t state)
{
- corgibl_blank(FB_BLANK_POWERDOWN);
+ corgibl_flags |= CORGIBL_SUSPENDED;
+ corgibl_send_intensity(corgi_backlight_device);
return 0;
}
static int corgibl_resume(struct platform_device *dev)
{
- corgibl_blank(FB_BLANK_UNBLANK);
+ corgibl_flags &= ~CORGIBL_SUSPENDED;
+ corgibl_send_intensity(corgi_backlight_device);
return 0;
}
#else
@@ -96,68 +79,55 @@ static int corgibl_resume(struct platform_device *dev)
#define corgibl_resume NULL
#endif
-
-static int corgibl_set_power(struct backlight_device *bd, int state)
-{
- corgibl_blank(state);
- return 0;
-}
-
-static int corgibl_get_power(struct backlight_device *bd)
+static int corgibl_get_intensity(struct backlight_device *bd)
{
- return corgibl_powermode;
+ return corgibl_intensity;
}
-static int corgibl_set_intensity(struct backlight_device *bd, int intensity)
+static int corgibl_set_intensity(struct backlight_device *bd)
{
- if (intensity > corgibl_data.max_brightness)
- intensity = corgibl_data.max_brightness;
- corgibl_send_intensity(intensity);
- current_intensity=intensity;
+ corgibl_send_intensity(corgi_backlight_device);
return 0;
}
-static int corgibl_get_intensity(struct backlight_device *bd)
-{
- return current_intensity;
-}
-
/*
* Called when the battery is low to limit the backlight intensity.
* If limit==0 clear any limit, otherwise limit the intensity
*/
void corgibl_limit_intensity(int limit)
{
- corgibl_limit = (limit ? 1 : 0);
- corgibl_send_intensity(current_intensity);
+ if (limit)
+ corgibl_flags |= CORGIBL_BATTLOW;
+ else
+ corgibl_flags &= ~CORGIBL_BATTLOW;
+ corgibl_send_intensity(corgi_backlight_device);
}
EXPORT_SYMBOL(corgibl_limit_intensity);
static struct backlight_properties corgibl_data = {
- .owner = THIS_MODULE,
- .get_power = corgibl_get_power,
- .set_power = corgibl_set_power,
+ .owner = THIS_MODULE,
.get_brightness = corgibl_get_intensity,
- .set_brightness = corgibl_set_intensity,
+ .update_status = corgibl_set_intensity,
};
-static struct backlight_device *corgi_backlight_device;
-
static int __init corgibl_probe(struct platform_device *pdev)
{
struct corgibl_machinfo *machinfo = pdev->dev.platform_data;
+ bl_machinfo = machinfo;
corgibl_data.max_brightness = machinfo->max_intensity;
- corgibl_mach_set_intensity = machinfo->set_bl_intensity;
+ if (!machinfo->limit_mask)
+ machinfo->limit_mask = -1;
corgi_backlight_device = backlight_device_register ("corgi-bl",
NULL, &corgibl_data);
if (IS_ERR (corgi_backlight_device))
return PTR_ERR (corgi_backlight_device);
- corgibl_set_intensity(NULL, CORGI_DEFAULT_INTENSITY);
- corgibl_limit_intensity(0);
+ corgibl_data.power = FB_BLANK_UNBLANK;
+ corgibl_data.brightness = machinfo->default_intensity;
+ corgibl_send_intensity(corgi_backlight_device);
printk("Corgi Backlight Driver Initialized.\n");
return 0;
@@ -167,8 +137,6 @@ static int corgibl_remove(struct platform_device *dev)
{
backlight_device_unregister(corgi_backlight_device);
- corgibl_set_intensity(NULL, 0);
-
printk("Corgi Backlight Driver Unloaded\n");
return 0;
}
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 95da4c9ed1f14..a71e984c93d47 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/fb.h>
#include <linux/backlight.h>
@@ -25,66 +25,58 @@
#define HP680_MAX_INTENSITY 255
#define HP680_DEFAULT_INTENSITY 10
-static int hp680bl_powermode = FB_BLANK_UNBLANK;
+static int hp680bl_suspended;
static int current_intensity = 0;
static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
+static struct backlight_device *hp680_backlight_device;
-static void hp680bl_send_intensity(int intensity)
+static void hp680bl_send_intensity(struct backlight_device *bd)
{
unsigned long flags;
+ u16 v;
+ int intensity = bd->props->brightness;
- if (hp680bl_powermode != FB_BLANK_UNBLANK)
+ if (bd->props->power != FB_BLANK_UNBLANK)
+ intensity = 0;
+ if (bd->props->fb_blank != FB_BLANK_UNBLANK)
+ intensity = 0;
+ if (hp680bl_suspended)
intensity = 0;
spin_lock_irqsave(&bl_lock, flags);
- sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS);
+ if (intensity && current_intensity == 0) {
+ sh_dac_enable(DAC_LCD_BRIGHTNESS);
+ v = inw(HD64461_GPBDR);
+ v &= ~HD64461_GPBDR_LCDOFF;
+ outw(v, HD64461_GPBDR);
+ sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS);
+ } else if (intensity == 0 && current_intensity != 0) {
+ sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS);
+ sh_dac_disable(DAC_LCD_BRIGHTNESS);
+ v = inw(HD64461_GPBDR);
+ v |= HD64461_GPBDR_LCDOFF;
+ outw(v, HD64461_GPBDR);
+ } else if (intensity) {
+ sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS);
+ }
spin_unlock_irqrestore(&bl_lock, flags);
-}
-static void hp680bl_blank(int blank)
-{
- u16 v;
-
- switch(blank) {
-
- case FB_BLANK_NORMAL:
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_POWERDOWN:
- if (hp680bl_powermode == FB_BLANK_UNBLANK) {
- hp680bl_send_intensity(0);
- hp680bl_powermode = blank;
- sh_dac_disable(DAC_LCD_BRIGHTNESS);
- v = inw(HD64461_GPBDR);
- v |= HD64461_GPBDR_LCDOFF;
- outw(v, HD64461_GPBDR);
- }
- break;
- case FB_BLANK_UNBLANK:
- if (hp680bl_powermode != FB_BLANK_UNBLANK) {
- sh_dac_enable(DAC_LCD_BRIGHTNESS);
- v = inw(HD64461_GPBDR);
- v &= ~HD64461_GPBDR_LCDOFF;
- outw(v, HD64461_GPBDR);
- hp680bl_powermode = blank;
- hp680bl_send_intensity(current_intensity);
- }
- break;
- }
+ current_intensity = intensity;
}
+
#ifdef CONFIG_PM
-static int hp680bl_suspend(struct device *dev, pm_message_t state, u32 level)
+static int hp680bl_suspend(struct platform_device *dev, pm_message_t state)
{
- if (level == SUSPEND_POWER_DOWN)
- hp680bl_blank(FB_BLANK_POWERDOWN);
+ hp680bl_suspended = 1;
+ hp680bl_send_intensity(hp680_backlight_device);
return 0;
}
-static int hp680bl_resume(struct device *dev, u32 level)
+static int hp680bl_resume(struct platform_device *dev)
{
- if (level == RESUME_POWER_ON)
- hp680bl_blank(FB_BLANK_UNBLANK);
+ hp680bl_suspended = 0;
+ hp680bl_send_intensity(hp680_backlight_device);
return 0;
}
#else
@@ -92,24 +84,9 @@ static int hp680bl_resume(struct device *dev, u32 level)
#define hp680bl_resume NULL
#endif
-
-static int hp680bl_set_power(struct backlight_device *bd, int state)
+static int hp680bl_set_intensity(struct backlight_device *bd)
{
- hp680bl_blank(state);
- return 0;
-}
-
-static int hp680bl_get_power(struct backlight_device *bd)
-{
- return hp680bl_powermode;
-}
-
-static int hp680bl_set_intensity(struct backlight_device *bd, int intensity)
-{
- if (intensity > HP680_MAX_INTENSITY)
- intensity = HP680_MAX_INTENSITY;
- hp680bl_send_intensity(intensity);
- current_intensity = intensity;
+ hp680bl_send_intensity(bd);
return 0;
}
@@ -120,65 +97,67 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
static struct backlight_properties hp680bl_data = {
.owner = THIS_MODULE,
- .get_power = hp680bl_get_power,
- .set_power = hp680bl_set_power,
.max_brightness = HP680_MAX_INTENSITY,
.get_brightness = hp680bl_get_intensity,
- .set_brightness = hp680bl_set_intensity,
+ .update_status = hp680bl_set_intensity,
};
-static struct backlight_device *hp680_backlight_device;
-
-static int __init hp680bl_probe(struct device *dev)
+static int __init hp680bl_probe(struct platform_device *dev)
{
hp680_backlight_device = backlight_device_register ("hp680-bl",
NULL, &hp680bl_data);
if (IS_ERR (hp680_backlight_device))
return PTR_ERR (hp680_backlight_device);
- hp680bl_set_intensity(NULL, HP680_DEFAULT_INTENSITY);
+ hp680_backlight_device->props->brightness = HP680_DEFAULT_INTENSITY;
+ hp680bl_send_intensity(hp680_backlight_device);
return 0;
}
-static int hp680bl_remove(struct device *dev)
+static int hp680bl_remove(struct platform_device *dev)
{
backlight_device_unregister(hp680_backlight_device);
return 0;
}
-static struct device_driver hp680bl_driver = {
- .name = "hp680-bl",
- .bus = &platform_bus_type,
+static struct platform_driver hp680bl_driver = {
.probe = hp680bl_probe,
.remove = hp680bl_remove,
.suspend = hp680bl_suspend,
.resume = hp680bl_resume,
+ .driver = {
+ .name = "hp680-bl",
+ },
};
-static struct platform_device hp680bl_device = {
- .name = "hp680-bl",
- .id = -1,
-};
+static struct platform_device *hp680bl_device;
static int __init hp680bl_init(void)
{
int ret;
- ret=driver_register(&hp680bl_driver);
+ ret = platform_driver_register(&hp680bl_driver);
if (!ret) {
- ret = platform_device_register(&hp680bl_device);
- if (ret)
- driver_unregister(&hp680bl_driver);
+ hp680bl_device = platform_device_alloc("hp680-bl", -1);
+ if (!hp680bl_device)
+ return -ENOMEM;
+
+ ret = platform_device_add(hp680bl_device);
+
+ if (ret) {
+ platform_device_put(hp680bl_device);
+ platform_driver_unregister(&hp680bl_driver);
+ }
}
return ret;
}
static void __exit hp680bl_exit(void)
{
- platform_device_unregister(&hp680bl_device);
- driver_unregister(&hp680bl_driver);
+ platform_device_unregister(hp680bl_device);
+ platform_driver_unregister(&hp680bl_driver);
}
module_init(hp680bl_init);
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index 910e2338a27e3..8ba6152db2fdb 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -169,7 +169,7 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
while (j--) {
l--;
- color = (*s & 1 << (FB_BIT_NR(l))) ? fgcolor : bgcolor;
+ color = (*s & (1 << l)) ? fgcolor : bgcolor;
val |= FB_SHIFT_HIGH(color, shift);
/* Did the bitshift spill bits to the next long? */
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 041d069878612..ca020719d20b1 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -466,7 +466,7 @@ static int __init fb_console_setup(char *this_opt)
int i, j;
if (!this_opt || !*this_opt)
- return 0;
+ return 1;
while ((options = strsep(&this_opt, ",")) != NULL) {
if (!strncmp(options, "font:", 5))
@@ -481,10 +481,10 @@ static int __init fb_console_setup(char *this_opt)
options++;
}
if (*options != ',')
- return 0;
+ return 1;
options++;
} else
- return 0;
+ return 1;
}
if (!strncmp(options, "map:", 4)) {
@@ -496,7 +496,7 @@ static int __init fb_console_setup(char *this_opt)
con2fb_map_boot[i] =
(options[j++]-'0') % FB_MAX;
}
- return 0;
+ return 1;
}
if (!strncmp(options, "vc:", 3)) {
@@ -518,7 +518,7 @@ static int __init fb_console_setup(char *this_opt)
rotate = 0;
}
}
- return 0;
+ return 1;
}
__setup("fbcon=", fb_console_setup);
@@ -1142,6 +1142,7 @@ static void fbcon_init(struct vc_data *vc, int init)
set_blitting_type(vc, info);
}
+ ops->p = &fb_display[fg_console];
}
static void fbcon_deinit(struct vc_data *vc)
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index d6041e781aca9..74ac2acaf72c8 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -275,7 +275,7 @@ static int __init sti_setup(char *str)
if (str)
strlcpy (default_sti_path, str, sizeof (default_sti_path));
- return 0;
+ return 1;
}
/* Assuming the machine has multiple STI consoles (=graphic cards) which
@@ -321,7 +321,7 @@ static int __init sti_font_setup(char *str)
i++;
}
- return 0;
+ return 1;
}
/* The optional linux kernel parameter "sti_font" defines which font
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index b1a8dca764306..944855b3e4afe 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1588,7 +1588,7 @@ static int __init video_setup(char *options)
}
}
- return 0;
+ return 1;
}
__setup("video=", video_setup);
#endif
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 53ad61f1038c2..809fc5eefc15b 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -232,9 +232,9 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (var->yres < MIN_YRES)
var->yres = MIN_YRES;
if (var->xres > fbi->max_xres)
- var->xres = fbi->max_xres;
+ return -EINVAL;
if (var->yres > fbi->max_yres)
- var->yres = fbi->max_yres;
+ return -EINVAL;
var->xres_virtual =
max(var->xres_virtual, var->xres);
var->yres_virtual =
@@ -781,7 +781,7 @@ static void pxafb_disable_controller(struct pxafb_info *fbi)
LCCR0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */
LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */
- schedule_timeout(20 * HZ / 1000);
+ schedule_timeout(200 * HZ / 1000);
remove_wait_queue(&fbi->ctrlr_wait, &wait);
/* disable LCD controller clock */
@@ -1274,7 +1274,7 @@ int __init pxafb_probe(struct platform_device *dev)
struct pxafb_mach_info *inf;
int ret;
- dev_dbg(dev, "pxafb_probe\n");
+ dev_dbg(&dev->dev, "pxafb_probe\n");
inf = dev->dev.platform_data;
ret = -ENOMEM;
diff --git a/drivers/video/radeonfb.c b/drivers/video/radeonfb.c
deleted file mode 100644
index afb6c2ead599d..0000000000000
--- a/drivers/video/radeonfb.c
+++ /dev/null
@@ -1,3167 +0,0 @@
-/*
- * drivers/video/radeonfb.c
- * framebuffer driver for ATI Radeon chipset video boards
- *
- * Copyright 2000 Ani Joshi <ajoshi@kernel.crashing.org>
- *
- *
- * ChangeLog:
- * 2000-08-03 initial version 0.0.1
- * 2000-09-10 more bug fixes, public release 0.0.5
- * 2001-02-19 mode bug fixes, 0.0.7
- * 2001-07-05 fixed scrolling issues, engine initialization,
- * and minor mode tweaking, 0.0.9
- * 2001-09-07 Radeon VE support, Nick Kurshev
- * blanking, pan_display, and cmap fixes, 0.1.0
- * 2001-10-10 Radeon 7500 and 8500 support, and experimental
- * flat panel support, 0.1.1
- * 2001-11-17 Radeon M6 (ppc) support, Daniel Berlin, 0.1.2
- * 2001-11-18 DFP fixes, Kevin Hendricks, 0.1.3
- * 2001-11-29 more cmap, backlight fixes, Benjamin Herrenschmidt
- * 2002-01-18 DFP panel detection via BIOS, Michael Clark, 0.1.4
- * 2002-06-02 console switching, mode set fixes, accel fixes
- * 2002-06-03 MTRR support, Peter Horton, 0.1.5
- * 2002-09-21 rv250, r300, m9 initial support,
- * added mirror option, 0.1.6
- *
- * Special thanks to ATI DevRel team for their hardware donations.
- *
- */
-
-
-#define RADEON_VERSION "0.1.6"
-
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#if defined(__powerpc__)
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include "macmodes.h"
-
-#ifdef CONFIG_NVRAM
-#include <linux/nvram.h>
-#endif
-
-#ifdef CONFIG_PMAC_BACKLIGHT
-#include <asm/backlight.h>
-#endif
-
-#ifdef CONFIG_BOOTX_TEXT
-#include <asm/btext.h>
-#endif
-
-#ifdef CONFIG_ADB_PMU
-#include <linux/adb.h>
-#include <linux/pmu.h>
-#endif
-
-#endif /* __powerpc__ */
-
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
-#include <video/radeon.h>
-#include <linux/radeonfb.h>
-
-#define DEBUG 0
-
-#if DEBUG
-#define RTRACE printk
-#else
-#define RTRACE if(0) printk
-#endif
-
-// XXX
-#undef CONFIG_PMAC_PBOOK
-
-
-enum radeon_chips {
- RADEON_QD,
- RADEON_QE,
- RADEON_QF,
- RADEON_QG,
- RADEON_QY,
- RADEON_QZ,
- RADEON_LW,
- RADEON_LX,
- RADEON_LY,
- RADEON_LZ,
- RADEON_QL,
- RADEON_QN,
- RADEON_QO,
- RADEON_Ql,
- RADEON_BB,
- RADEON_QW,
- RADEON_QX,
- RADEON_Id,
- RADEON_Ie,
- RADEON_If,
- RADEON_Ig,
- RADEON_Ya,
- RADEON_Yd,
- RADEON_Ld,
- RADEON_Le,
- RADEON_Lf,
- RADEON_Lg,
- RADEON_ND,
- RADEON_NE,
- RADEON_NF,
- RADEON_NG,
- RADEON_QM
-};
-
-enum radeon_arch {
- RADEON_R100,
- RADEON_RV100,
- RADEON_R200,
- RADEON_RV200,
- RADEON_RV250,
- RADEON_R300,
- RADEON_M6,
- RADEON_M7,
- RADEON_M9
-};
-
-static struct radeon_chip_info {
- const char *name;
- unsigned char arch;
-} radeon_chip_info[] __devinitdata = {
- { "QD", RADEON_R100 },
- { "QE", RADEON_R100 },
- { "QF", RADEON_R100 },
- { "QG", RADEON_R100 },
- { "VE QY", RADEON_RV100 },
- { "VE QZ", RADEON_RV100 },
- { "M7 LW", RADEON_M7 },
- { "M7 LX", RADEON_M7 },
- { "M6 LY", RADEON_M6 },
- { "M6 LZ", RADEON_M6 },
- { "8500 QL", RADEON_R200 },
- { "8500 QN", RADEON_R200 },
- { "8500 QO", RADEON_R200 },
- { "8500 Ql", RADEON_R200 },
- { "8500 BB", RADEON_R200 },
- { "7500 QW", RADEON_RV200 },
- { "7500 QX", RADEON_RV200 },
- { "9000 Id", RADEON_RV250 },
- { "9000 Ie", RADEON_RV250 },
- { "9000 If", RADEON_RV250 },
- { "9000 Ig", RADEON_RV250 },
- { "M9 Ld", RADEON_M9 },
- { "M9 Le", RADEON_M9 },
- { "M9 Lf", RADEON_M9 },
- { "M9 Lg", RADEON_M9 },
- { "9700 ND", RADEON_R300 },
- { "9700 NE", RADEON_R300 },
- { "9700 NF", RADEON_R300 },
- { "9700 NG", RADEON_R300 },
- { "9100 QM", RADEON_R200 }
-};
-
-
-enum radeon_montype
-{
- MT_NONE,
- MT_CRT, /* CRT */
- MT_LCD, /* LCD */
- MT_DFP, /* DVI */
- MT_CTV, /* composite TV */
- MT_STV /* S-Video out */
-};
-
-
-static struct pci_device_id radeonfb_pci_table[] = {
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QD},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QE},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QF},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QG},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QY},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QZ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QZ},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LW},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LX},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LY, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LY},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LZ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LZ},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QL},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QN},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QO},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ql, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ql},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_BB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_BB},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QW},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QX},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Id},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ie, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ie},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_If, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_If},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ig, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ig},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ya, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ya},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Yd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Yd},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ld, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ld},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Le, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Le},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Lf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Lf},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Lg, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Lg},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_ND, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_ND},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NE},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NF},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NG},
- { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QM},
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, radeonfb_pci_table);
-
-
-typedef struct {
- u16 reg;
- u32 val;
-} reg_val;
-
-
-/* these common regs are cleared before mode setting so they do not
- * interfere with anything
- */
-static reg_val common_regs[] = {
- { OVR_CLR, 0 },
- { OVR_WID_LEFT_RIGHT, 0 },
- { OVR_WID_TOP_BOTTOM, 0 },
- { OV0_SCALE_CNTL, 0 },
- { SUBPIC_CNTL, 0 },
- { VIPH_CONTROL, 0 },
- { I2C_CNTL_1, 0 },
- { GEN_INT_CNTL, 0 },
- { CAP0_TRIG_CNTL, 0 },
-};
-
-static reg_val common_regs_m6[] = {
- { OVR_CLR, 0 },
- { OVR_WID_LEFT_RIGHT, 0 },
- { OVR_WID_TOP_BOTTOM, 0 },
- { OV0_SCALE_CNTL, 0 },
- { SUBPIC_CNTL, 0 },
- { GEN_INT_CNTL, 0 },
- { CAP0_TRIG_CNTL, 0 }
-};
-
-typedef struct {
- u8 clock_chip_type;
- u8 struct_size;
- u8 accelerator_entry;
- u8 VGA_entry;
- u16 VGA_table_offset;
- u16 POST_table_offset;
- u16 XCLK;
- u16 MCLK;
- u8 num_PLL_blocks;
- u8 size_PLL_blocks;
- u16 PCLK_ref_freq;
- u16 PCLK_ref_divider;
- u32 PCLK_min_freq;
- u32 PCLK_max_freq;
- u16 MCLK_ref_freq;
- u16 MCLK_ref_divider;
- u32 MCLK_min_freq;
- u32 MCLK_max_freq;
- u16 XCLK_ref_freq;
- u16 XCLK_ref_divider;
- u32 XCLK_min_freq;
- u32 XCLK_max_freq;
-} __attribute__ ((packed)) PLL_BLOCK;
-
-
-struct pll_info {
- int ppll_max;
- int ppll_min;
- int xclk;
- int ref_div;
- int ref_clk;
-};
-
-
-struct ram_info {
- int ml;
- int mb;
- int trcd;
- int trp;
- int twr;
- int cl;
- int tr2w;
- int loop_latency;
- int rloop;
-};
-
-
-struct radeon_regs {
- /* CRTC regs */
- u32 crtc_h_total_disp;
- u32 crtc_h_sync_strt_wid;
- u32 crtc_v_total_disp;
- u32 crtc_v_sync_strt_wid;
- u32 crtc_pitch;
- u32 crtc_gen_cntl;
- u32 crtc_ext_cntl;
- u32 dac_cntl;
-
- u32 flags;
- u32 pix_clock;
- int xres, yres;
-
- /* DDA regs */
- u32 dda_config;
- u32 dda_on_off;
-
- /* PLL regs */
- u32 ppll_div_3;
- u32 ppll_ref_div;
- u32 vclk_ecp_cntl;
-
- /* Flat panel regs */
- u32 fp_crtc_h_total_disp;
- u32 fp_crtc_v_total_disp;
- u32 fp_gen_cntl;
- u32 fp_h_sync_strt_wid;
- u32 fp_horz_stretch;
- u32 fp_panel_cntl;
- u32 fp_v_sync_strt_wid;
- u32 fp_vert_stretch;
- u32 lvds_gen_cntl;
- u32 lvds_pll_cntl;
- u32 tmds_crc;
- u32 tmds_transmitter_cntl;
-
-#if defined(__BIG_ENDIAN)
- u32 surface_cntl;
-#endif
-};
-
-
-struct radeonfb_info {
- struct fb_info info;
-
- struct radeon_regs state;
- struct radeon_regs init_state;
-
- char name[32];
- char ram_type[12];
-
- unsigned long mmio_base_phys;
- unsigned long fb_base_phys;
-
- void __iomem *mmio_base;
- void __iomem *fb_base;
-
- struct pci_dev *pdev;
-
- unsigned char *EDID;
- unsigned char __iomem *bios_seg;
-
- u32 pseudo_palette[17];
- struct { u8 red, green, blue, pad; } palette[256];
-
- int chipset;
- unsigned char arch;
- int video_ram;
- u8 rev;
- int pitch, bpp, depth;
- int xres, yres, pixclock;
- int xres_virtual, yres_virtual;
- u32 accel_flags;
-
- int use_default_var;
- int got_dfpinfo;
-
- int hasCRTC2;
- int crtDisp_type;
- int dviDisp_type;
-
- int panel_xres, panel_yres;
- int clock;
- int hOver_plus, hSync_width, hblank;
- int vOver_plus, vSync_width, vblank;
- int hAct_high, vAct_high, interlaced;
- int synct, misc;
-
- u32 dp_gui_master_cntl;
-
- struct pll_info pll;
- int pll_output_freq, post_div, fb_div;
-
- struct ram_info ram;
-
- int mtrr_hdl;
-
-#ifdef CONFIG_PMAC_PBOOK
- int pm_reg;
- u32 save_regs[64];
- u32 mdll, mdll2;
-#endif /* CONFIG_PMAC_PBOOK */
- int asleep;
-
- struct radeonfb_info *next;
-};
-
-
-static struct fb_var_screeninfo radeonfb_default_var = {
- 640, 480, 640, 480, 0, 0, 8, 0,
- {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0},
- 0, 0, -1, -1, 0, 39721, 40, 24, 32, 11, 96, 2,
- 0, FB_VMODE_NONINTERLACED
-};
-
-/*
- * IO macros
- */
-
-#define INREG8(addr) readb((rinfo->mmio_base)+addr)
-#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
-#define INREG(addr) readl((rinfo->mmio_base)+addr)
-#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
-
-#define OUTPLL(addr,val) \
- do { \
- OUTREG8(CLOCK_CNTL_INDEX, (addr & 0x0000003f) | 0x00000080); \
- OUTREG(CLOCK_CNTL_DATA, val); \
- } while(0)
-
-#define OUTPLLP(addr,val,mask) \
- do { \
- unsigned int _tmp = INPLL(addr); \
- _tmp &= (mask); \
- _tmp |= (val); \
- OUTPLL(addr, _tmp); \
- } while (0)
-
-#define OUTREGP(addr,val,mask) \
- do { \
- unsigned int _tmp = INREG(addr); \
- _tmp &= (mask); \
- _tmp |= (val); \
- OUTREG(addr, _tmp); \
- } while (0)
-
-
-static __inline__ u32 _INPLL(struct radeonfb_info *rinfo, u32 addr)
-{
- OUTREG8(CLOCK_CNTL_INDEX, addr & 0x0000003f);
- return (INREG(CLOCK_CNTL_DATA));
-}
-
-#define INPLL(addr) _INPLL(rinfo, addr)
-
-#define PRIMARY_MONITOR(rinfo) ((rinfo->dviDisp_type != MT_NONE) && \
- (rinfo->dviDisp_type != MT_STV) && \
- (rinfo->dviDisp_type != MT_CTV) ? \
- rinfo->dviDisp_type : rinfo->crtDisp_type)
-
-static char *GET_MON_NAME(int type)
-{
- char *pret = NULL;
-
- switch (type) {
- case MT_NONE:
- pret = "no";
- break;
- case MT_CRT:
- pret = "CRT";
- break;
- case MT_DFP:
- pret = "DFP";
- break;
- case MT_LCD:
- pret = "LCD";
- break;
- case MT_CTV:
- pret = "CTV";
- break;
- case MT_STV:
- pret = "STV";
- break;
- }
-
- return pret;
-}
-
-
-/*
- * 2D engine routines
- */
-
-static __inline__ void radeon_engine_flush (struct radeonfb_info *rinfo)
-{
- int i;
-
- /* initiate flush */
- OUTREGP(RB2D_DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL,
- ~RB2D_DC_FLUSH_ALL);
-
- for (i=0; i < 2000000; i++) {
- if (!(INREG(RB2D_DSTCACHE_CTLSTAT) & RB2D_DC_BUSY))
- break;
- }
-}
-
-
-static __inline__ void _radeon_fifo_wait (struct radeonfb_info *rinfo, int entries)
-{
- int i;
-
- for (i=0; i<2000000; i++)
- if ((INREG(RBBM_STATUS) & 0x7f) >= entries)
- return;
-}
-
-
-static __inline__ void _radeon_engine_idle (struct radeonfb_info *rinfo)
-{
- int i;
-
- /* ensure FIFO is empty before waiting for idle */
- _radeon_fifo_wait (rinfo, 64);
-
- for (i=0; i<2000000; i++) {
- if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) {
- radeon_engine_flush (rinfo);
- return;
- }
- }
-}
-
-
-#define radeon_engine_idle() _radeon_engine_idle(rinfo)
-#define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries)
-
-
-
-/*
- * helper routines
- */
-
-static __inline__ u32 radeon_get_dstbpp(u16 depth)
-{
- switch (depth) {
- case 8:
- return DST_8BPP;
- case 15:
- return DST_15BPP;
- case 16:
- return DST_16BPP;
- case 32:
- return DST_32BPP;
- default:
- return 0;
- }
-}
-
-
-static inline int var_to_depth(const struct fb_var_screeninfo *var)
-{
- if (var->bits_per_pixel != 16)
- return var->bits_per_pixel;
- return (var->green.length == 6) ? 16 : 15;
-}
-
-
-static void _radeon_engine_reset(struct radeonfb_info *rinfo)
-{
- u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset;
-
- radeon_engine_flush (rinfo);
-
- clock_cntl_index = INREG(CLOCK_CNTL_INDEX);
- mclk_cntl = INPLL(MCLK_CNTL);
-
- OUTPLL(MCLK_CNTL, (mclk_cntl |
- FORCEON_MCLKA |
- FORCEON_MCLKB |
- FORCEON_YCLKA |
- FORCEON_YCLKB |
- FORCEON_MC |
- FORCEON_AIC));
- rbbm_soft_reset = INREG(RBBM_SOFT_RESET);
-
- OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset |
- SOFT_RESET_CP |
- SOFT_RESET_HI |
- SOFT_RESET_SE |
- SOFT_RESET_RE |
- SOFT_RESET_PP |
- SOFT_RESET_E2 |
- SOFT_RESET_RB);
- INREG(RBBM_SOFT_RESET);
- OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset & (u32)
- ~(SOFT_RESET_CP |
- SOFT_RESET_HI |
- SOFT_RESET_SE |
- SOFT_RESET_RE |
- SOFT_RESET_PP |
- SOFT_RESET_E2 |
- SOFT_RESET_RB));
- INREG(RBBM_SOFT_RESET);
-
- OUTPLL(MCLK_CNTL, mclk_cntl);
- OUTREG(CLOCK_CNTL_INDEX, clock_cntl_index);
- OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset);
-
- return;
-}
-
-#define radeon_engine_reset() _radeon_engine_reset(rinfo)
-
-
-static __inline__ int round_div(int num, int den)
-{
- return (num + (den / 2)) / den;
-}
-
-
-
-static __inline__ int min_bits_req(int val)
-{
- int bits_req = 0;
-
- if (val == 0)
- bits_req = 1;
-
- while (val) {
- val >>= 1;
- bits_req++;
- }
-
- return (bits_req);
-}
-
-
-static __inline__ int _max(int val1, int val2)
-{
- if (val1 >= val2)
- return val1;
- else
- return val2;
-}
-
-
-
-/*
- * globals
- */
-
-#ifndef MODULE
-static char *mode_option;
-#endif
-
-static char noaccel = 0;
-static char mirror = 0;
-static int panel_yres = 0;
-static char force_dfp = 0;
-static struct radeonfb_info *board_list = NULL;
-static char nomtrr = 0;
-
-/*
- * prototypes
- */
-
-static void radeon_save_state (struct radeonfb_info *rinfo,
- struct radeon_regs *save);
-static void radeon_engine_init (struct radeonfb_info *rinfo);
-static void radeon_write_mode (struct radeonfb_info *rinfo,
- struct radeon_regs *mode);
-static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo);
-static int __devinit radeon_init_disp (struct radeonfb_info *rinfo);
-static int radeon_init_disp_var (struct radeonfb_info *rinfo, struct fb_var_screeninfo *var);
-static void __iomem *radeon_find_rom(struct radeonfb_info *rinfo);
-static void radeon_get_pllinfo(struct radeonfb_info *rinfo, void __iomem *bios_seg);
-static void radeon_get_moninfo (struct radeonfb_info *rinfo);
-static int radeon_get_dfpinfo (struct radeonfb_info *rinfo);
-static int radeon_get_dfpinfo_BIOS(struct radeonfb_info *rinfo);
-static void radeon_get_EDID(struct radeonfb_info *rinfo);
-static int radeon_dfp_parse_EDID(struct radeonfb_info *rinfo);
-static void radeon_update_default_var(struct radeonfb_info *rinfo);
-
-#ifdef CONFIG_PPC_OF
-
-static int radeon_read_OF (struct radeonfb_info *rinfo);
-static int radeon_get_EDID_OF(struct radeonfb_info *rinfo);
-extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
-
-#ifdef CONFIG_PMAC_PBOOK
-int radeon_sleep_notify(struct pmu_sleep_notifier *self, int when);
-static struct pmu_sleep_notifier radeon_sleep_notifier = {
- radeon_sleep_notify, SLEEP_LEVEL_VIDEO,
-};
-#endif /* CONFIG_PMAC_PBOOK */
-#ifdef CONFIG_PMAC_BACKLIGHT
-static int radeon_set_backlight_enable(int on, int level, void *data);
-static int radeon_set_backlight_level(int level, void *data);
-static struct backlight_controller radeon_backlight_controller = {
- radeon_set_backlight_enable,
- radeon_set_backlight_level
-};
-#endif /* CONFIG_PMAC_BACKLIGHT */
-
-#endif /* CONFIG_PPC_OF */
-
-
-static void __iomem *radeon_find_rom(struct radeonfb_info *rinfo)
-{
-#if defined(__i386__)
- u32 segstart;
- char __iomem *rom_base;
- char __iomem *rom;
- int stage;
- int i,j;
- char aty_rom_sig[] = "761295520";
- char *radeon_sig[] = {
- "RG6",
- "RADEON"
- };
-
- for(segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) {
-
- stage = 1;
-
- rom_base = ioremap(segstart, 0x1000);
-
- if ((*rom_base == 0x55) && (((*(rom_base + 1)) & 0xff) == 0xaa))
- stage = 2;
-
-
- if (stage != 2) {
- iounmap(rom_base);
- continue;
- }
-
- rom = rom_base;
-
- for (i = 0; (i < 128 - strlen(aty_rom_sig)) && (stage != 3); i++) {
- if (aty_rom_sig[0] == *rom)
- if (strncmp(aty_rom_sig, rom,
- strlen(aty_rom_sig)) == 0)
- stage = 3;
- rom++;
- }
- if (stage != 3) {
- iounmap(rom_base);
- continue;
- }
- rom = rom_base;
-
- for (i = 0; (i < 512) && (stage != 4); i++) {
- for (j = 0; j < ARRAY_SIZE(radeon_sig); j++) {
- if (radeon_sig[j][0] == *rom)
- if (strncmp(radeon_sig[j], rom,
- strlen(radeon_sig[j])) == 0) {
- stage = 4;
- break;
- }
- }
- rom++;
- }
- if (stage != 4) {
- iounmap(rom_base);
- continue;
- }
-
- return rom_base;
- }
-#endif
- return NULL;
-}
-
-
-
-
-static void radeon_get_pllinfo(struct radeonfb_info *rinfo, void __iomem *bios_seg)
-{
- void __iomem *bios_header;
- void __iomem *header_ptr;
- u16 bios_header_offset, pll_info_offset;
- PLL_BLOCK pll;
-
- if (bios_seg) {
- bios_header = bios_seg + 0x48L;
- header_ptr = bios_header;
-
- bios_header_offset = readw(header_ptr);
- bios_header = bios_seg + bios_header_offset;
- bios_header += 0x30;
-
- header_ptr = bios_header;
- pll_info_offset = readw(header_ptr);
- header_ptr = bios_seg + pll_info_offset;
-
- memcpy_fromio(&pll, header_ptr, 50);
-
- rinfo->pll.xclk = (u32)pll.XCLK;
- rinfo->pll.ref_clk = (u32)pll.PCLK_ref_freq;
- rinfo->pll.ref_div = (u32)pll.PCLK_ref_divider;
- rinfo->pll.ppll_min = pll.PCLK_min_freq;
- rinfo->pll.ppll_max = pll.PCLK_max_freq;
-
- printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d from BIOS\n",
- rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk);
- } else {
-#ifdef CONFIG_PPC_OF
- if (radeon_read_OF(rinfo)) {
- unsigned int tmp, Nx, M, ref_div, xclk;
-
- tmp = INPLL(M_SPLL_REF_FB_DIV);
- ref_div = INPLL(PPLL_REF_DIV) & 0x3ff;
-
- Nx = (tmp & 0xff00) >> 8;
- M = (tmp & 0xff);
- xclk = ((((2 * Nx * rinfo->pll.ref_clk) + (M)) /
- (2 * M)));
-
- rinfo->pll.xclk = xclk;
- rinfo->pll.ref_div = ref_div;
- rinfo->pll.ppll_min = 12000;
- rinfo->pll.ppll_max = 35000;
-
- printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d from OF\n",
- rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk);
-
- return;
- }
-#endif
- /* no BIOS or BIOS not found, use defaults */
- switch (rinfo->chipset) {
- case PCI_DEVICE_ID_ATI_RADEON_QW:
- case PCI_DEVICE_ID_ATI_RADEON_QX:
- rinfo->pll.ppll_max = 35000;
- rinfo->pll.ppll_min = 12000;
- rinfo->pll.xclk = 23000;
- rinfo->pll.ref_div = 12;
- rinfo->pll.ref_clk = 2700;
- break;
- case PCI_DEVICE_ID_ATI_RADEON_QL:
- case PCI_DEVICE_ID_ATI_RADEON_QN:
- case PCI_DEVICE_ID_ATI_RADEON_QO:
- case PCI_DEVICE_ID_ATI_RADEON_Ql:
- case PCI_DEVICE_ID_ATI_RADEON_BB:
- rinfo->pll.ppll_max = 35000;
- rinfo->pll.ppll_min = 12000;
- rinfo->pll.xclk = 27500;
- rinfo->pll.ref_div = 12;
- rinfo->pll.ref_clk = 2700;
- break;
- case PCI_DEVICE_ID_ATI_RADEON_Id:
- case PCI_DEVICE_ID_ATI_RADEON_Ie:
- case PCI_DEVICE_ID_ATI_RADEON_If:
- case PCI_DEVICE_ID_ATI_RADEON_Ig:
- rinfo->pll.ppll_max = 35000;
- rinfo->pll.ppll_min = 12000;
- rinfo->pll.xclk = 25000;
- rinfo->pll.ref_div = 12;
- rinfo->pll.ref_clk = 2700;
- break;
- case PCI_DEVICE_ID_ATI_RADEON_ND:
- case PCI_DEVICE_ID_ATI_RADEON_NE:
- case PCI_DEVICE_ID_ATI_RADEON_NF:
- case PCI_DEVICE_ID_ATI_RADEON_NG:
- rinfo->pll.ppll_max = 40000;
- rinfo->pll.ppll_min = 20000;
- rinfo->pll.xclk = 27000;
- rinfo->pll.ref_div = 12;
- rinfo->pll.ref_clk = 2700;
- break;
- case PCI_DEVICE_ID_ATI_RADEON_QD:
- case PCI_DEVICE_ID_ATI_RADEON_QE:
- case PCI_DEVICE_ID_ATI_RADEON_QF:
- case PCI_DEVICE_ID_ATI_RADEON_QG:
- default:
- rinfo->pll.ppll_max = 35000;
- rinfo->pll.ppll_min = 12000;
- rinfo->pll.xclk = 16600;
- rinfo->pll.ref_div = 67;
- rinfo->pll.ref_clk = 2700;
- break;
- }
-
- printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d defaults\n",
- rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk);
- }
-}
-
-
-static void radeon_get_moninfo (struct radeonfb_info *rinfo)
-{
- unsigned int tmp;
-
- if (force_dfp) {
- rinfo->dviDisp_type = MT_DFP;
- return;
- }
-
- tmp = INREG(BIOS_4_SCRATCH);
- printk(KERN_DEBUG "radeon_get_moninfo: bios 4 scratch = %x\n", tmp);
-
- if (rinfo->hasCRTC2) {
- /* primary DVI port */
- if (tmp & 0x08)
- rinfo->dviDisp_type = MT_DFP;
- else if (tmp & 0x4)
- rinfo->dviDisp_type = MT_LCD;
- else if (tmp & 0x200)
- rinfo->dviDisp_type = MT_CRT;
- else if (tmp & 0x10)
- rinfo->dviDisp_type = MT_CTV;
- else if (tmp & 0x20)
- rinfo->dviDisp_type = MT_STV;
-
- /* secondary CRT port */
- if (tmp & 0x2)
- rinfo->crtDisp_type = MT_CRT;
- else if (tmp & 0x800)
- rinfo->crtDisp_type = MT_DFP;
- else if (tmp & 0x400)
- rinfo->crtDisp_type = MT_LCD;
- else if (tmp & 0x1000)
- rinfo->crtDisp_type = MT_CTV;
- else if (tmp & 0x2000)
- rinfo->crtDisp_type = MT_STV;
- } else {
- rinfo->dviDisp_type = MT_NONE;
-
- tmp = INREG(FP_GEN_CNTL);
-
- if (tmp & FP_EN_TMDS)
- rinfo->crtDisp_type = MT_DFP;
- else
- rinfo->crtDisp_type = MT_CRT;
- }
-}
-
-
-
-static void radeon_get_EDID(struct radeonfb_info *rinfo)
-{
-#ifdef CONFIG_PPC_OF
- if (!radeon_get_EDID_OF(rinfo))
- RTRACE("radeonfb: could not retrieve EDID from OF\n");
-#else
- /* XXX use other methods later */
-#endif
-}
-
-
-#ifdef CONFIG_PPC_OF
-static int radeon_get_EDID_OF(struct radeonfb_info *rinfo)
-{
- struct device_node *dp;
- unsigned char *pedid = NULL;
- static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID", "EDID1", NULL };
- int i;
-
- dp = pci_device_to_OF_node(rinfo->pdev);
- while (dp != NULL) {
- for (i = 0; propnames[i] != NULL; ++i) {
- pedid = (unsigned char *)
- get_property(dp, propnames[i], NULL);
- if (pedid != NULL) {
- rinfo->EDID = pedid;
- return 1;
- }
- }
- dp = dp->child;
- }
- return 0;
-}
-#endif /* CONFIG_PPC_OF */
-
-
-static int radeon_dfp_parse_EDID(struct radeonfb_info *rinfo)
-{
- unsigned char *block = rinfo->EDID;
-
- if (!block)
- return 0;
-
- /* jump to the detailed timing block section */
- block += 54;
-
- rinfo->clock = (block[0] + (block[1] << 8));
- rinfo->panel_xres = (block[2] + ((block[4] & 0xf0) << 4));
- rinfo->hblank = (block[3] + ((block[4] & 0x0f) << 8));
- rinfo->panel_yres = (block[5] + ((block[7] & 0xf0) << 4));
- rinfo->vblank = (block[6] + ((block[7] & 0x0f) << 8));
- rinfo->hOver_plus = (block[8] + ((block[11] & 0xc0) << 2));
- rinfo->hSync_width = (block[9] + ((block[11] & 0x30) << 4));
- rinfo->vOver_plus = ((block[10] >> 4) + ((block[11] & 0x0c) << 2));
- rinfo->vSync_width = ((block[10] & 0x0f) + ((block[11] & 0x03) << 4));
- rinfo->interlaced = ((block[17] & 0x80) >> 7);
- rinfo->synct = ((block[17] & 0x18) >> 3);
- rinfo->misc = ((block[17] & 0x06) >> 1);
- rinfo->hAct_high = rinfo->vAct_high = 0;
- if (rinfo->synct == 3) {
- if (rinfo->misc & 2)
- rinfo->hAct_high = 1;
- if (rinfo->misc & 1)
- rinfo->vAct_high = 1;
- }
-
- printk("radeonfb: detected DFP panel size from EDID: %dx%d\n",
- rinfo->panel_xres, rinfo->panel_yres);
-
- rinfo->got_dfpinfo = 1;
-
- return 1;
-}
-
-
-static void radeon_update_default_var(struct radeonfb_info *rinfo)
-{
- struct fb_var_screeninfo *var = &radeonfb_default_var;
-
- var->xres = rinfo->panel_xres;
- var->yres = rinfo->panel_yres;
- var->xres_virtual = rinfo->panel_xres;
- var->yres_virtual = rinfo->panel_yres;
- var->xoffset = var->yoffset = 0;
- var->bits_per_pixel = 8;
- var->pixclock = 100000000 / rinfo->clock;
- var->left_margin = (rinfo->hblank - rinfo->hOver_plus - rinfo->hSync_width);
- var->right_margin = rinfo->hOver_plus;
- var->upper_margin = (rinfo->vblank - rinfo->vOver_plus - rinfo->vSync_width);
- var->lower_margin = rinfo->vOver_plus;
- var->hsync_len = rinfo->hSync_width;
- var->vsync_len = rinfo->vSync_width;
- var->sync = 0;
- if (rinfo->synct == 3) {
- if (rinfo->hAct_high)
- var->sync |= FB_SYNC_HOR_HIGH_ACT;
- if (rinfo->vAct_high)
- var->sync |= FB_SYNC_VERT_HIGH_ACT;
- }
-
- var->vmode = 0;
- if (rinfo->interlaced)
- var->vmode |= FB_VMODE_INTERLACED;
-
- rinfo->use_default_var = 1;
-}
-
-
-static int radeon_get_dfpinfo_BIOS(struct radeonfb_info *rinfo)
-{
- char __iomem *fpbiosstart, *tmp, *tmp0;
- char stmp[30];
- int i;
-
- if (!rinfo->bios_seg)
- return 0;
-
- if (!(fpbiosstart = rinfo->bios_seg + readw(rinfo->bios_seg + 0x48))) {
- printk("radeonfb: Failed to detect DFP panel info using BIOS\n");
- return 0;
- }
-
- if (!(tmp = rinfo->bios_seg + readw(fpbiosstart + 0x40))) {
- printk("radeonfb: Failed to detect DFP panel info using BIOS\n");
- return 0;
- }
-
- for(i=0; i<24; i++)
- stmp[i] = readb(tmp+i+1);
- stmp[24] = 0;
- printk("radeonfb: panel ID string: %s\n", stmp);
- rinfo->panel_xres = readw(tmp + 25);
- rinfo->panel_yres = readw(tmp + 27);
- printk("radeonfb: detected DFP panel size from BIOS: %dx%d\n",
- rinfo->panel_xres, rinfo->panel_yres);
-
- for(i=0; i<32; i++) {
- tmp0 = rinfo->bios_seg + readw(tmp+64+i*2);
- if (tmp0 == 0)
- break;
- if ((readw(tmp0) == rinfo->panel_xres) &&
- (readw(tmp0+2) == rinfo->panel_yres)) {
- rinfo->hblank = (readw(tmp0+17) - readw(tmp0+19)) * 8;
- rinfo->hOver_plus = ((readw(tmp0+21) - readw(tmp0+19) -1) * 8) & 0x7fff;
- rinfo->hSync_width = readb(tmp0+23) * 8;
- rinfo->vblank = readw(tmp0+24) - readw(tmp0+26);
- rinfo->vOver_plus = (readw(tmp0+28) & 0x7ff) - readw(tmp0+26);
- rinfo->vSync_width = (readw(tmp0+28) & 0xf800) >> 11;
- rinfo->clock = readw(tmp0+9);
-
- rinfo->got_dfpinfo = 1;
- return 1;
- }
- }
-
- return 0;
-}
-
-
-
-static int radeon_get_dfpinfo (struct radeonfb_info *rinfo)
-{
- unsigned int tmp;
- unsigned short a, b;
-
- if (radeon_get_dfpinfo_BIOS(rinfo))
- radeon_update_default_var(rinfo);
-
- if (radeon_dfp_parse_EDID(rinfo))
- radeon_update_default_var(rinfo);
-
- if (!rinfo->got_dfpinfo) {
- /*
- * it seems all else has failed now and we
- * resort to probing registers for our DFP info
- */
- if (panel_yres) {
- rinfo->panel_yres = panel_yres;
- } else {
- tmp = INREG(FP_VERT_STRETCH);
- tmp &= 0x00fff000;
- rinfo->panel_yres = (unsigned short)(tmp >> 0x0c) + 1;
- }
-
- switch (rinfo->panel_yres) {
- case 480:
- rinfo->panel_xres = 640;
- break;
- case 600:
- rinfo->panel_xres = 800;
- break;
- case 768:
-#if defined(__powerpc__)
- if (rinfo->dviDisp_type == MT_LCD)
- rinfo->panel_xres = 1152;
- else
-#endif
- rinfo->panel_xres = 1024;
- break;
- case 1024:
- rinfo->panel_xres = 1280;
- break;
- case 1050:
- rinfo->panel_xres = 1400;
- break;
- case 1200:
- rinfo->panel_xres = 1600;
- break;
- default:
- printk("radeonfb: Failed to detect DFP panel size\n");
- return 0;
- }
-
- printk("radeonfb: detected DFP panel size from registers: %dx%d\n",
- rinfo->panel_xres, rinfo->panel_yres);
-
- tmp = INREG(FP_CRTC_H_TOTAL_DISP);
- a = (tmp & FP_CRTC_H_TOTAL_MASK) + 4;
- b = (tmp & 0x01ff0000) >> FP_CRTC_H_DISP_SHIFT;
- rinfo->hblank = (a - b + 1) * 8;
-
- tmp = INREG(FP_H_SYNC_STRT_WID);
- rinfo->hOver_plus = (unsigned short) ((tmp & FP_H_SYNC_STRT_CHAR_MASK) >>
- FP_H_SYNC_STRT_CHAR_SHIFT) - b - 1;
- rinfo->hOver_plus *= 8;
- rinfo->hSync_width = (unsigned short) ((tmp & FP_H_SYNC_WID_MASK) >>
- FP_H_SYNC_WID_SHIFT);
- rinfo->hSync_width *= 8;
- tmp = INREG(FP_CRTC_V_TOTAL_DISP);
- a = (tmp & FP_CRTC_V_TOTAL_MASK) + 1;
- b = (tmp & FP_CRTC_V_DISP_MASK) >> FP_CRTC_V_DISP_SHIFT;
- rinfo->vblank = a - b /* + 24 */ ;
-
- tmp = INREG(FP_V_SYNC_STRT_WID);
- rinfo->vOver_plus = (unsigned short) (tmp & FP_V_SYNC_STRT_MASK)
- - b + 1;
- rinfo->vSync_width = (unsigned short) ((tmp & FP_V_SYNC_WID_MASK) >>
- FP_V_SYNC_WID_SHIFT);
-
- return 1;
- }
-
- return 1;
-}
-
-
-#ifdef CONFIG_PPC_OF
-static int radeon_read_OF (struct radeonfb_info *rinfo)
-{
- struct device_node *dp;
- unsigned int *xtal;
-
- dp = pci_device_to_OF_node(rinfo->pdev);
-
- xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", NULL);
-
- rinfo->pll.ref_clk = *xtal / 10;
-
- if (*xtal)
- return 1;
- else
- return 0;
-}
-#endif
-
-
-static void radeon_engine_init (struct radeonfb_info *rinfo)
-{
- u32 temp;
-
- /* disable 3D engine */
- OUTREG(RB3D_CNTL, 0);
-
- radeon_engine_reset ();
-
- radeon_fifo_wait (1);
- OUTREG(RB2D_DSTCACHE_MODE, 0);
-
- radeon_fifo_wait (1);
- temp = INREG(DEFAULT_PITCH_OFFSET);
- OUTREG(DEFAULT_PITCH_OFFSET, ((temp & 0xc0000000) |
- (rinfo->pitch << 0x16)));
-
- radeon_fifo_wait (1);
- OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN);
-
- radeon_fifo_wait (1);
- OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX |
- DEFAULT_SC_BOTTOM_MAX));
-
- temp = radeon_get_dstbpp(rinfo->depth);
- rinfo->dp_gui_master_cntl = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS);
- radeon_fifo_wait (1);
- OUTREG(DP_GUI_MASTER_CNTL, (rinfo->dp_gui_master_cntl |
- GMC_BRUSH_SOLID_COLOR |
- GMC_SRC_DATATYPE_COLOR));
-
- radeon_fifo_wait (7);
-
- /* clear line drawing regs */
- OUTREG(DST_LINE_START, 0);
- OUTREG(DST_LINE_END, 0);
-
- /* set brush color regs */
- OUTREG(DP_BRUSH_FRGD_CLR, 0xffffffff);
- OUTREG(DP_BRUSH_BKGD_CLR, 0x00000000);
-
- /* set source color regs */
- OUTREG(DP_SRC_FRGD_CLR, 0xffffffff);
- OUTREG(DP_SRC_BKGD_CLR, 0x00000000);
-
- /* default write mask */
- OUTREG(DP_WRITE_MSK, 0xffffffff);
-
- radeon_engine_idle ();
-}
-
-
-static int __devinit radeon_init_disp (struct radeonfb_info *rinfo)
-{
- struct fb_info *info = &rinfo->info;
- struct fb_var_screeninfo var;
-
- var = radeonfb_default_var;
- if ((radeon_init_disp_var(rinfo, &var)) < 0)
- return -1;
-
- rinfo->depth = var_to_depth(&var);
- rinfo->bpp = var.bits_per_pixel;
-
- info->var = var;
- fb_alloc_cmap(&info->cmap, 256, 0);
-
- var.activate = FB_ACTIVATE_NOW;
- return 0;
-}
-
-
-static int radeon_init_disp_var (struct radeonfb_info *rinfo,
- struct fb_var_screeninfo *var)
-{
-#ifndef MODULE
- if (mode_option)
- fb_find_mode (var, &rinfo->info, mode_option,
- NULL, 0, NULL, 8);
- else
-#endif
- if (rinfo->use_default_var)
- /* We will use the modified default far */
- *var = radeonfb_default_var;
- else
-
- fb_find_mode (var, &rinfo->info, "640x480-8@60",
- NULL, 0, NULL, 0);
-
- if (noaccel)
- var->accel_flags &= ~FB_ACCELF_TEXT;
- else
- var->accel_flags |= FB_ACCELF_TEXT;
-
- return 0;
-}
-
-
-static int radeon_do_maximize(struct radeonfb_info *rinfo,
- struct fb_var_screeninfo *var,
- struct fb_var_screeninfo *v,
- int nom, int den)
-{
- static struct {
- int xres, yres;
- } modes[] = {
- {1600, 1280},
- {1280, 1024},
- {1024, 768},
- {800, 600},
- {640, 480},
- {-1, -1}
- };
- int i;
-
- /* use highest possible virtual resolution */
- if (v->xres_virtual == -1 && v->yres_virtual == -1) {
- printk("radeonfb: using max available virtual resolution\n");
- for (i=0; modes[i].xres != -1; i++) {
- if (modes[i].xres * nom / den * modes[i].yres <
- rinfo->video_ram / 2)
- break;
- }
- if (modes[i].xres == -1) {
- printk("radeonfb: could not find virtual resolution that fits into video memory!\n");
- return -EINVAL;
- }
- v->xres_virtual = modes[i].xres;
- v->yres_virtual = modes[i].yres;
-
- printk("radeonfb: virtual resolution set to max of %dx%d\n",
- v->xres_virtual, v->yres_virtual);
- } else if (v->xres_virtual == -1) {
- v->xres_virtual = (rinfo->video_ram * den /
- (nom * v->yres_virtual * 2)) & ~15;
- } else if (v->yres_virtual == -1) {
- v->xres_virtual = (v->xres_virtual + 15) & ~15;
- v->yres_virtual = rinfo->video_ram * den /
- (nom * v->xres_virtual *2);
- } else {
- if (v->xres_virtual * nom / den * v->yres_virtual >
- rinfo->video_ram) {
- return -EINVAL;
- }
- }
-
- if (v->xres_virtual * nom / den >= 8192) {
- v->xres_virtual = 8192 * den / nom - 16;
- }
-
- if (v->xres_virtual < v->xres)
- return -EINVAL;
-
- if (v->yres_virtual < v->yres)
- return -EINVAL;
-
- return 0;
-}
-
-
-static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct radeonfb_info *rinfo = (struct radeonfb_info *) info->par;
- struct fb_var_screeninfo v;
- int nom, den;
-
- memcpy (&v, var, sizeof (v));
-
- switch (v.bits_per_pixel) {
- case 0 ... 8:
- v.bits_per_pixel = 8;
- break;
- case 9 ... 16:
- v.bits_per_pixel = 16;
- break;
- case 17 ... 24:
-#if 0 /* Doesn't seem to work */
- v.bits_per_pixel = 24;
- break;
-#endif
- return -EINVAL;
- case 25 ... 32:
- v.bits_per_pixel = 32;
- break;
- default:
- return -EINVAL;
- }
-
- switch (var_to_depth(&v)) {
- case 8:
- nom = den = 1;
- v.red.offset = v.green.offset = v.blue.offset = 0;
- v.red.length = v.green.length = v.blue.length = 8;
- v.transp.offset = v.transp.length = 0;
- break;
- case 15:
- nom = 2;
- den = 1;
- v.red.offset = 10;
- v.green.offset = 5;
- v.blue.offset = 0;
- v.red.length = v.green.length = v.blue.length = 5;
- v.transp.offset = v.transp.length = 0;
- break;
- case 16:
- nom = 2;
- den = 1;
- v.red.offset = 11;
- v.green.offset = 5;
- v.blue.offset = 0;
- v.red.length = 5;
- v.green.length = 6;
- v.blue.length = 5;
- v.transp.offset = v.transp.length = 0;
- break;
- case 24:
- nom = 4;
- den = 1;
- v.red.offset = 16;
- v.green.offset = 8;
- v.blue.offset = 0;
- v.red.length = v.blue.length = v.green.length = 8;
- v.transp.offset = v.transp.length = 0;
- break;
- case 32:
- nom = 4;
- den = 1;
- v.red.offset = 16;
- v.green.offset = 8;
- v.blue.offset = 0;
- v.red.length = v.blue.length = v.green.length = 8;
- v.transp.offset = 24;
- v.transp.length = 8;
- break;
- default:
- printk ("radeonfb: mode %dx%dx%d rejected, color depth invalid\n",
- var->xres, var->yres, var->bits_per_pixel);
- return -EINVAL;
- }
-
- if (radeon_do_maximize(rinfo, var, &v, nom, den) < 0)
- return -EINVAL;
-
- if (v.xoffset < 0)
- v.xoffset = 0;
- if (v.yoffset < 0)
- v.yoffset = 0;
-
- if (v.xoffset > v.xres_virtual - v.xres)
- v.xoffset = v.xres_virtual - v.xres - 1;
-
- if (v.yoffset > v.yres_virtual - v.yres)
- v.yoffset = v.yres_virtual - v.yres - 1;
-
- v.red.msb_right = v.green.msb_right = v.blue.msb_right =
- v.transp.offset = v.transp.length =
- v.transp.msb_right = 0;
-
- if (noaccel)
- v.accel_flags = 0;
-
- memcpy(var, &v, sizeof(v));
-
- return 0;
-}
-
-
-static int radeonfb_pan_display (struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct radeonfb_info *rinfo = (struct radeonfb_info *) info;
-
- if ((var->xoffset + var->xres > var->xres_virtual)
- || (var->yoffset + var->yres > var->yres_virtual))
- return -EINVAL;
-
- if (rinfo->asleep)
- return 0;
-
- OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset)
- * var->bits_per_pixel / 8) & ~7);
- return 0;
-}
-
-
-static int radeonfb_ioctl (struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- struct radeonfb_info *rinfo = (struct radeonfb_info *) info;
- unsigned int tmp;
- u32 value = 0;
- int rc;
-
- switch (cmd) {
- /*
- * TODO: set mirror accordingly for non-Mobility chipsets with 2 CRTC's
- */
- case FBIO_RADEON_SET_MIRROR:
- switch (rinfo->arch) {
- case RADEON_R100:
- case RADEON_RV100:
- case RADEON_R200:
- case RADEON_RV200:
- case RADEON_RV250:
- case RADEON_R300:
- return -EINVAL;
- default:
- /* RADEON M6, RADEON_M7, RADEON_M9 */
- break;
- }
-
- rc = get_user(value, (__u32 __user *)arg);
-
- if (rc)
- return rc;
-
- if (value & 0x01) {
- tmp = INREG(LVDS_GEN_CNTL);
-
- tmp |= (LVDS_ON | LVDS_BLON);
- } else {
- tmp = INREG(LVDS_GEN_CNTL);
-
- tmp &= ~(LVDS_ON | LVDS_BLON);
- }
-
- OUTREG(LVDS_GEN_CNTL, tmp);
-
- if (value & 0x02) {
- tmp = INREG(CRTC_EXT_CNTL);
- tmp |= CRTC_CRT_ON;
-
- mirror = 1;
- } else {
- tmp = INREG(CRTC_EXT_CNTL);
- tmp &= ~CRTC_CRT_ON;
-
- mirror = 0;
- }
-
- OUTREG(CRTC_EXT_CNTL, tmp);
-
- break;
- case FBIO_RADEON_GET_MIRROR:
- switch (rinfo->arch) {
- case RADEON_R100:
- case RADEON_RV100:
- case RADEON_R200:
- case RADEON_RV200:
- case RADEON_RV250:
- case RADEON_R300:
- return -EINVAL;
- default:
- /* RADEON M6, RADEON_M7, RADEON_M9 */
- break;
- }
-
- tmp = INREG(LVDS_GEN_CNTL);
- if ((LVDS_ON | LVDS_BLON) & tmp)
- value |= 0x01;
-
- tmp = INREG(CRTC_EXT_CNTL);
- if (CRTC_CRT_ON & tmp)
- value |= 0x02;
-
- return put_user(value, (__u32 __user *)arg);
- default:
- return -EINVAL;
- }
-
- return -EINVAL;
-}
-
-
-static int radeonfb_blank (int blank, struct fb_info *info)
-{
- struct radeonfb_info *rinfo = (struct radeonfb_info *) info;
- u32 val = INREG(CRTC_EXT_CNTL);
- u32 val2 = INREG(LVDS_GEN_CNTL);
-
- if (rinfo->asleep)
- return 0;
-
-#ifdef CONFIG_PMAC_BACKLIGHT
- if (rinfo->dviDisp_type == MT_LCD && machine_is(powermac)) {
- set_backlight_enable(!blank);
- return 0;
- }
-#endif
-
- /* reset it */
- val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS |
- CRTC_VSYNC_DIS);
- val2 &= ~(LVDS_DISPLAY_DIS);
-
- switch (blank) {
- case FB_BLANK_UNBLANK:
- case FB_BLANK_NORMAL:
- break;
- case FB_BLANK_VSYNC_SUSPEND:
- val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS);
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- val |= (CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS);
- break;
- case FB_BLANK_POWERDOWN:
- val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS |
- CRTC_HSYNC_DIS);
- val2 |= (LVDS_DISPLAY_DIS);
- break;
- }
-
- switch (rinfo->dviDisp_type) {
- case MT_LCD:
- OUTREG(LVDS_GEN_CNTL, val2);
- break;
- case MT_CRT:
- default:
- OUTREG(CRTC_EXT_CNTL, val);
- break;
- }
-
- /* let fbcon do a soft blank for us */
- return (blank == FB_BLANK_NORMAL) ? 1 : 0;
-}
-
-
-static int radeonfb_setcolreg (unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp, struct fb_info *info)
-{
- struct radeonfb_info *rinfo = (struct radeonfb_info *) info;
- u32 pindex, vclk_cntl;
- unsigned int i;
-
- if (regno > 255)
- return 1;
-
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- rinfo->palette[regno].red = red;
- rinfo->palette[regno].green = green;
- rinfo->palette[regno].blue = blue;
-
- /* default */
- pindex = regno;
-
- if (!rinfo->asleep) {
- vclk_cntl = INPLL(VCLK_ECP_CNTL);
- OUTPLL(VCLK_ECP_CNTL, vclk_cntl & ~PIXCLK_DAC_ALWAYS_ONb);
-
- if (rinfo->bpp == 16) {
- pindex = regno * 8;
-
- if (rinfo->depth == 16 && regno > 63)
- return 1;
- if (rinfo->depth == 15 && regno > 31)
- return 1;
-
- /* For 565, the green component is mixed one order below */
- if (rinfo->depth == 16) {
- OUTREG(PALETTE_INDEX, pindex>>1);
- OUTREG(PALETTE_DATA, (rinfo->palette[regno>>1].red << 16) |
- (green << 8) | (rinfo->palette[regno>>1].blue));
- green = rinfo->palette[regno<<1].green;
- }
- }
-
- if (rinfo->depth != 16 || regno < 32) {
- OUTREG(PALETTE_INDEX, pindex);
- OUTREG(PALETTE_DATA, (red << 16) | (green << 8) | blue);
- }
-
- OUTPLL(VCLK_ECP_CNTL, vclk_cntl);
- }
- if (regno < 16) {
- switch (rinfo->depth) {
- case 15:
- ((u16 *) (info->pseudo_palette))[regno] =
- (regno << 10) | (regno << 5) | regno;
- break;
- case 16:
- ((u16 *) (info->pseudo_palette))[regno] =
- (regno << 11) | (regno << 6) | regno;
- break;
- case 24:
- ((u32 *) (info->pseudo_palette))[regno] =
- (regno << 16) | (regno << 8) | regno;
- break;
- case 32:
- i = (regno << 8) | regno;
- ((u32 *) (info->pseudo_palette))[regno] =
- (i << 16) | i;
- break;
- }
- }
- return 0;
-}
-
-
-
-static void radeon_save_state (struct radeonfb_info *rinfo,
- struct radeon_regs *save)
-{
- /* CRTC regs */
- save->crtc_gen_cntl = INREG(CRTC_GEN_CNTL);
- save->crtc_ext_cntl = INREG(CRTC_EXT_CNTL);
- save->dac_cntl = INREG(DAC_CNTL);
- save->crtc_h_total_disp = INREG(CRTC_H_TOTAL_DISP);
- save->crtc_h_sync_strt_wid = INREG(CRTC_H_SYNC_STRT_WID);
- save->crtc_v_total_disp = INREG(CRTC_V_TOTAL_DISP);
- save->crtc_v_sync_strt_wid = INREG(CRTC_V_SYNC_STRT_WID);
- save->crtc_pitch = INREG(CRTC_PITCH);
-#if defined(__BIG_ENDIAN)
- save->surface_cntl = INREG(SURFACE_CNTL);
-#endif
-
- /* FP regs */
- save->fp_crtc_h_total_disp = INREG(FP_CRTC_H_TOTAL_DISP);
- save->fp_crtc_v_total_disp = INREG(FP_CRTC_V_TOTAL_DISP);
- save->fp_gen_cntl = INREG(FP_GEN_CNTL);
- save->fp_h_sync_strt_wid = INREG(FP_H_SYNC_STRT_WID);
- save->fp_horz_stretch = INREG(FP_HORZ_STRETCH);
- save->fp_v_sync_strt_wid = INREG(FP_V_SYNC_STRT_WID);
- save->fp_vert_stretch = INREG(FP_VERT_STRETCH);
- save->lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
- save->lvds_pll_cntl = INREG(LVDS_PLL_CNTL);
- save->tmds_crc = INREG(TMDS_CRC);
- save->tmds_transmitter_cntl = INREG(TMDS_TRANSMITTER_CNTL);
- save->vclk_ecp_cntl = INPLL(VCLK_ECP_CNTL);
-}
-
-
-
-static int radeonfb_set_par (struct fb_info *info)
-{
- struct radeonfb_info *rinfo = (struct radeonfb_info *)info->par;
- struct fb_var_screeninfo *mode = &info->var;
- struct radeon_regs newmode;
- int hTotal, vTotal, hSyncStart, hSyncEnd,
- hSyncPol, vSyncStart, vSyncEnd, vSyncPol, cSync;
- u8 hsync_adj_tab[] = {0, 0x12, 9, 9, 6, 5};
- u8 hsync_fudge_fp[] = {2, 2, 0, 0, 5, 5};
- u32 dotClock = 1000000000 / mode->pixclock,
- sync, h_sync_pol, v_sync_pol;
- int freq = dotClock / 10; /* x 100 */
- int xclk_freq, vclk_freq, xclk_per_trans, xclk_per_trans_precise;
- int useable_precision, roff, ron;
- int min_bits, format = 0;
- int hsync_start, hsync_fudge, bytpp, hsync_wid, vsync_wid;
- int primary_mon = PRIMARY_MONITOR(rinfo);
- int depth = var_to_depth(mode);
- int accel = (mode->accel_flags & FB_ACCELF_TEXT) != 0;
-
- rinfo->xres = mode->xres;
- rinfo->yres = mode->yres;
- rinfo->xres_virtual = mode->xres_virtual;
- rinfo->yres_virtual = mode->yres_virtual;
- rinfo->pixclock = mode->pixclock;
-
- hSyncStart = mode->xres + mode->right_margin;
- hSyncEnd = hSyncStart + mode->hsync_len;
- hTotal = hSyncEnd + mode->left_margin;
-
- vSyncStart = mode->yres + mode->lower_margin;
- vSyncEnd = vSyncStart + mode->vsync_len;
- vTotal = vSyncEnd + mode->upper_margin;
-
- if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
- if (rinfo->panel_xres < mode->xres)
- rinfo->xres = mode->xres = rinfo->panel_xres;
- if (rinfo->panel_yres < mode->yres)
- rinfo->yres = mode->yres = rinfo->panel_yres;
-
- hTotal = mode->xres + rinfo->hblank;
- hSyncStart = mode->xres + rinfo->hOver_plus;
- hSyncEnd = hSyncStart + rinfo->hSync_width;
-
- vTotal = mode->yres + rinfo->vblank;
- vSyncStart = mode->yres + rinfo->vOver_plus;
- vSyncEnd = vSyncStart + rinfo->vSync_width;
- }
-
- sync = mode->sync;
- h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
- v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
-
- RTRACE("hStart = %d, hEnd = %d, hTotal = %d\n",
- hSyncStart, hSyncEnd, hTotal);
- RTRACE("vStart = %d, vEnd = %d, vTotal = %d\n",
- vSyncStart, vSyncEnd, vTotal);
-
- hsync_wid = (hSyncEnd - hSyncStart) / 8;
- vsync_wid = vSyncEnd - vSyncStart;
- if (hsync_wid == 0)
- hsync_wid = 1;
- else if (hsync_wid > 0x3f) /* max */
- hsync_wid = 0x3f;
-
- if (vsync_wid == 0)
- vsync_wid = 1;
- else if (vsync_wid > 0x1f) /* max */
- vsync_wid = 0x1f;
-
- hSyncPol = mode->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
- vSyncPol = mode->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
-
- cSync = mode->sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0;
-
- format = radeon_get_dstbpp(depth);
- bytpp = mode->bits_per_pixel >> 3;
-
- if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD))
- hsync_fudge = hsync_fudge_fp[format-1];
- else
- hsync_fudge = hsync_adj_tab[format-1];
-
- hsync_start = hSyncStart - 8 + hsync_fudge;
-
- newmode.crtc_gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN |
- (format << 8);
-
- if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
- newmode.crtc_ext_cntl = VGA_ATI_LINEAR | XCRT_CNT_EN;
- if (mirror)
- newmode.crtc_ext_cntl |= CRTC_CRT_ON;
-
- newmode.crtc_gen_cntl &= ~(CRTC_DBL_SCAN_EN |
- CRTC_INTERLACE_EN);
- } else {
- newmode.crtc_ext_cntl = VGA_ATI_LINEAR | XCRT_CNT_EN |
- CRTC_CRT_ON;
- }
-
- newmode.dac_cntl = /* INREG(DAC_CNTL) | */ DAC_MASK_ALL | DAC_VGA_ADR_EN |
- DAC_8BIT_EN;
-
- newmode.crtc_h_total_disp = ((((hTotal / 8) - 1) & 0x3ff) |
- (((mode->xres / 8) - 1) << 16));
-
- newmode.crtc_h_sync_strt_wid = ((hsync_start & 0x1fff) |
- (hsync_wid << 16) | (h_sync_pol << 23));
-
- newmode.crtc_v_total_disp = ((vTotal - 1) & 0xffff) |
- ((mode->yres - 1) << 16);
-
- newmode.crtc_v_sync_strt_wid = (((vSyncStart - 1) & 0xfff) |
- (vsync_wid << 16) | (v_sync_pol << 23));
-
- if (accel) {
- /* We first calculate the engine pitch */
- rinfo->pitch = ((mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8) + 0x3f)
- & ~(0x3f)) >> 6;
-
- /* Then, re-multiply it to get the CRTC pitch */
- newmode.crtc_pitch = (rinfo->pitch << 3) / ((mode->bits_per_pixel + 1) / 8);
- } else
- newmode.crtc_pitch = (mode->xres_virtual >> 3);
- newmode.crtc_pitch |= (newmode.crtc_pitch << 16);
-
-#if defined(__BIG_ENDIAN)
- /*
- * It looks like recent chips have a problem with SURFACE_CNTL,
- * setting SURF_TRANSLATION_DIS completely disables the
- * swapper as well, so we leave it unset now.
- */
- newmode.surface_cntl = 0;
-
- /* Setup swapping on both apertures, though we currently
- * only use aperture 0, enabling swapper on aperture 1
- * won't harm
- */
- switch (mode->bits_per_pixel) {
- case 16:
- newmode.surface_cntl |= NONSURF_AP0_SWP_16BPP;
- newmode.surface_cntl |= NONSURF_AP1_SWP_16BPP;
- break;
- case 24:
- case 32:
- newmode.surface_cntl |= NONSURF_AP0_SWP_32BPP;
- newmode.surface_cntl |= NONSURF_AP1_SWP_32BPP;
- break;
- }
-#endif
-
- rinfo->pitch = ((mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8) + 0x3f)
- & ~(0x3f)) / 64;
-
- RTRACE("h_total_disp = 0x%x\t hsync_strt_wid = 0x%x\n",
- newmode.crtc_h_total_disp, newmode.crtc_h_sync_strt_wid);
- RTRACE("v_total_disp = 0x%x\t vsync_strt_wid = 0x%x\n",
- newmode.crtc_v_total_disp, newmode.crtc_v_sync_strt_wid);
-
- newmode.xres = mode->xres;
- newmode.yres = mode->yres;
-
- rinfo->bpp = mode->bits_per_pixel;
- rinfo->depth = depth;
-
- if (freq > rinfo->pll.ppll_max)
- freq = rinfo->pll.ppll_max;
- if (freq*12 < rinfo->pll.ppll_min)
- freq = rinfo->pll.ppll_min / 12;
-
- {
- struct {
- int divider;
- int bitvalue;
- } *post_div,
- post_divs[] = {
- { 1, 0 },
- { 2, 1 },
- { 4, 2 },
- { 8, 3 },
- { 3, 4 },
- { 16, 5 },
- { 6, 6 },
- { 12, 7 },
- { 0, 0 },
- };
-
- for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
- rinfo->pll_output_freq = post_div->divider * freq;
- if (rinfo->pll_output_freq >= rinfo->pll.ppll_min &&
- rinfo->pll_output_freq <= rinfo->pll.ppll_max)
- break;
- }
-
- rinfo->post_div = post_div->divider;
- rinfo->fb_div = round_div(rinfo->pll.ref_div*rinfo->pll_output_freq,
- rinfo->pll.ref_clk);
- newmode.ppll_ref_div = rinfo->pll.ref_div;
- newmode.ppll_div_3 = rinfo->fb_div | (post_div->bitvalue << 16);
- }
- newmode.vclk_ecp_cntl = rinfo->init_state.vclk_ecp_cntl;
-
-#ifdef CONFIG_PPC_OF
- /* Gross hack for iBook with M7 until I find out a proper fix */
- if (machine_is_compatible("PowerBook4,3") && rinfo->arch == RADEON_M7)
- newmode.ppll_div_3 = 0x000600ad;
-#endif /* CONFIG_PPC_OF */
-
- RTRACE("post div = 0x%x\n", rinfo->post_div);
- RTRACE("fb_div = 0x%x\n", rinfo->fb_div);
- RTRACE("ppll_div_3 = 0x%x\n", newmode.ppll_div_3);
-
- /* DDA */
- vclk_freq = round_div(rinfo->pll.ref_clk * rinfo->fb_div,
- rinfo->pll.ref_div * rinfo->post_div);
- xclk_freq = rinfo->pll.xclk;
-
- xclk_per_trans = round_div(xclk_freq * 128, vclk_freq * mode->bits_per_pixel);
-
- min_bits = min_bits_req(xclk_per_trans);
- useable_precision = min_bits + 1;
-
- xclk_per_trans_precise = round_div((xclk_freq * 128) << (11 - useable_precision),
- vclk_freq * mode->bits_per_pixel);
-
- ron = (4 * rinfo->ram.mb + 3 * _max(rinfo->ram.trcd - 2, 0) +
- 2 * rinfo->ram.trp + rinfo->ram.twr + rinfo->ram.cl + rinfo->ram.tr2w +
- xclk_per_trans) << (11 - useable_precision);
- roff = xclk_per_trans_precise * (32 - 4);
-
- RTRACE("ron = %d, roff = %d\n", ron, roff);
- RTRACE("vclk_freq = %d, per = %d\n", vclk_freq, xclk_per_trans_precise);
-
- if ((ron + rinfo->ram.rloop) >= roff) {
- printk("radeonfb: error ron out of range\n");
- return -EINVAL;
- }
-
- newmode.dda_config = (xclk_per_trans_precise |
- (useable_precision << 16) |
- (rinfo->ram.rloop << 20));
- newmode.dda_on_off = (ron << 16) | roff;
-
- if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
- unsigned int hRatio, vRatio;
-
- /* We force the pixel clock to be always enabled. Allowing it
- * to be power managed during blanking would save power, but has
- * nasty interactions with the 2D engine & sleep code that haven't
- * been solved yet. --BenH
- */
- newmode.vclk_ecp_cntl &= ~PIXCLK_DAC_ALWAYS_ONb;
-
- if (mode->xres > rinfo->panel_xres)
- mode->xres = rinfo->panel_xres;
- if (mode->yres > rinfo->panel_yres)
- mode->yres = rinfo->panel_yres;
-
- newmode.fp_horz_stretch = (((rinfo->panel_xres / 8) - 1)
- << HORZ_PANEL_SHIFT);
- newmode.fp_vert_stretch = ((rinfo->panel_yres - 1)
- << VERT_PANEL_SHIFT);
-
- if (mode->xres != rinfo->panel_xres) {
- hRatio = round_div(mode->xres * HORZ_STRETCH_RATIO_MAX,
- rinfo->panel_xres);
- newmode.fp_horz_stretch = (((((unsigned long)hRatio) & HORZ_STRETCH_RATIO_MASK)) |
- (newmode.fp_horz_stretch &
- (HORZ_PANEL_SIZE | HORZ_FP_LOOP_STRETCH |
- HORZ_AUTO_RATIO_INC)));
- newmode.fp_horz_stretch |= (HORZ_STRETCH_BLEND |
- HORZ_STRETCH_ENABLE);
- }
- newmode.fp_horz_stretch &= ~HORZ_AUTO_RATIO;
-
- if (mode->yres != rinfo->panel_yres) {
- vRatio = round_div(mode->yres * VERT_STRETCH_RATIO_MAX,
- rinfo->panel_yres);
- newmode.fp_vert_stretch = (((((unsigned long)vRatio) & VERT_STRETCH_RATIO_MASK)) |
- (newmode.fp_vert_stretch &
- (VERT_PANEL_SIZE | VERT_STRETCH_RESERVED)));
- newmode.fp_vert_stretch |= (VERT_STRETCH_BLEND |
- VERT_STRETCH_ENABLE);
- }
- newmode.fp_vert_stretch &= ~VERT_AUTO_RATIO_EN;
-
- newmode.fp_gen_cntl = (rinfo->init_state.fp_gen_cntl & (u32)
- ~(FP_SEL_CRTC2 |
- FP_RMX_HVSYNC_CONTROL_EN |
- FP_DFP_SYNC_SEL |
- FP_CRT_SYNC_SEL |
- FP_CRTC_LOCK_8DOT |
- FP_USE_SHADOW_EN |
- FP_CRTC_USE_SHADOW_VEND |
- FP_CRT_SYNC_ALT));
-
- newmode.fp_gen_cntl |= (FP_CRTC_DONT_SHADOW_VPAR |
- FP_CRTC_DONT_SHADOW_HEND);
-
- newmode.lvds_gen_cntl = rinfo->init_state.lvds_gen_cntl;
- newmode.lvds_pll_cntl = rinfo->init_state.lvds_pll_cntl;
- newmode.tmds_crc = rinfo->init_state.tmds_crc;
- newmode.tmds_transmitter_cntl = rinfo->init_state.tmds_transmitter_cntl;
-
- if (primary_mon == MT_LCD) {
- newmode.lvds_gen_cntl |= (LVDS_ON | LVDS_BLON);
- newmode.fp_gen_cntl &= ~(FP_FPON | FP_TMDS_EN);
- } else {
- /* DFP */
- newmode.fp_gen_cntl |= (FP_FPON | FP_TMDS_EN);
- newmode.tmds_transmitter_cntl = (TMDS_RAN_PAT_RST |
- TMDS_ICHCSEL | TMDS_PLL_EN) &
- ~(TMDS_PLLRST);
- newmode.crtc_ext_cntl &= ~CRTC_CRT_ON;
- }
-
- newmode.fp_crtc_h_total_disp = (((rinfo->hblank / 8) & 0x3ff) |
- (((mode->xres / 8) - 1) << 16));
- newmode.fp_crtc_v_total_disp = (rinfo->vblank & 0xffff) |
- ((mode->yres - 1) << 16);
- newmode.fp_h_sync_strt_wid = ((rinfo->hOver_plus & 0x1fff) |
- (hsync_wid << 16) | (h_sync_pol << 23));
- newmode.fp_v_sync_strt_wid = ((rinfo->vOver_plus & 0xfff) |
- (vsync_wid << 16) | (v_sync_pol << 23));
- }
-
- /* do it! */
- if (!rinfo->asleep) {
- radeon_write_mode (rinfo, &newmode);
- /* (re)initialize the engine */
- if (noaccel)
- radeon_engine_init (rinfo);
-
- }
- /* Update fix */
- if (accel)
- info->fix.line_length = rinfo->pitch*64;
- else
- info->fix.line_length = mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8);
- info->fix.visual = rinfo->depth == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-
-#ifdef CONFIG_BOOTX_TEXT
- /* Update debug text engine */
- btext_update_display(rinfo->fb_base_phys, mode->xres, mode->yres,
- rinfo->depth, info->fix.line_length);
-#endif
-
- return 0;
-}
-
-
-static void radeon_write_mode (struct radeonfb_info *rinfo,
- struct radeon_regs *mode)
-{
- int i;
- int primary_mon = PRIMARY_MONITOR(rinfo);
-
- radeonfb_blank(VESA_POWERDOWN, (struct fb_info *)rinfo);
-
-
- if (rinfo->arch == RADEON_M6) {
- for (i=0; i<7; i++)
- OUTREG(common_regs_m6[i].reg, common_regs_m6[i].val);
- } else {
- for (i=0; i<9; i++)
- OUTREG(common_regs[i].reg, common_regs[i].val);
- }
-
- OUTREG(CRTC_GEN_CNTL, mode->crtc_gen_cntl);
- OUTREGP(CRTC_EXT_CNTL, mode->crtc_ext_cntl,
- CRTC_HSYNC_DIS | CRTC_VSYNC_DIS | CRTC_DISPLAY_DIS);
- OUTREGP(DAC_CNTL, mode->dac_cntl, DAC_RANGE_CNTL | DAC_BLANKING);
- OUTREG(CRTC_H_TOTAL_DISP, mode->crtc_h_total_disp);
- OUTREG(CRTC_H_SYNC_STRT_WID, mode->crtc_h_sync_strt_wid);
- OUTREG(CRTC_V_TOTAL_DISP, mode->crtc_v_total_disp);
- OUTREG(CRTC_V_SYNC_STRT_WID, mode->crtc_v_sync_strt_wid);
- OUTREG(CRTC_OFFSET, 0);
- OUTREG(CRTC_OFFSET_CNTL, 0);
- OUTREG(CRTC_PITCH, mode->crtc_pitch);
-
-#if defined(__BIG_ENDIAN)
- OUTREG(SURFACE_CNTL, mode->surface_cntl);
-#endif
-
- while ((INREG(CLOCK_CNTL_INDEX) & PPLL_DIV_SEL_MASK) !=
- PPLL_DIV_SEL_MASK) {
- OUTREGP(CLOCK_CNTL_INDEX, PPLL_DIV_SEL_MASK, 0xffff);
- }
-
- OUTPLLP(PPLL_CNTL, PPLL_RESET, 0xffff);
-
- while ((INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK) !=
- (mode->ppll_ref_div & PPLL_REF_DIV_MASK)) {
- OUTPLLP(PPLL_REF_DIV, mode->ppll_ref_div, ~PPLL_REF_DIV_MASK);
- }
-
- while ((INPLL(PPLL_DIV_3) & PPLL_FB3_DIV_MASK) !=
- (mode->ppll_div_3 & PPLL_FB3_DIV_MASK)) {
- OUTPLLP(PPLL_DIV_3, mode->ppll_div_3, ~PPLL_FB3_DIV_MASK);
- }
-
- while ((INPLL(PPLL_DIV_3) & PPLL_POST3_DIV_MASK) !=
- (mode->ppll_div_3 & PPLL_POST3_DIV_MASK)) {
- OUTPLLP(PPLL_DIV_3, mode->ppll_div_3, ~PPLL_POST3_DIV_MASK);
- }
-
- OUTPLL(HTOTAL_CNTL, 0);
-
- OUTPLLP(PPLL_CNTL, 0, ~PPLL_RESET);
-
-// OUTREG(DDA_CONFIG, mode->dda_config);
-// OUTREG(DDA_ON_OFF, mode->dda_on_off);
-
- if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
- OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp);
- OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp);
- OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid);
- OUTREG(FP_V_SYNC_STRT_WID, mode->fp_v_sync_strt_wid);
- OUTREG(FP_HORZ_STRETCH, mode->fp_horz_stretch);
- OUTREG(FP_VERT_STRETCH, mode->fp_vert_stretch);
- OUTREG(FP_GEN_CNTL, mode->fp_gen_cntl);
- OUTREG(TMDS_CRC, mode->tmds_crc);
- OUTREG(TMDS_TRANSMITTER_CNTL, mode->tmds_transmitter_cntl);
-
- if (primary_mon == MT_LCD) {
- unsigned int tmp = INREG(LVDS_GEN_CNTL);
-
- mode->lvds_gen_cntl &= ~LVDS_STATE_MASK;
- mode->lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_STATE_MASK);
-
- if ((tmp & (LVDS_ON | LVDS_BLON)) ==
- (mode->lvds_gen_cntl & (LVDS_ON | LVDS_BLON))) {
- OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl);
- } else {
- if (mode->lvds_gen_cntl & (LVDS_ON | LVDS_BLON)) {
- udelay(1000);
- OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl);
- } else {
- OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl |
- LVDS_BLON);
- udelay(1000);
- OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl);
- }
- }
- }
- }
-
- radeonfb_blank(VESA_NO_BLANKING, (struct fb_info *)rinfo);
-
- OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
-
- return;
-}
-
-static struct fb_ops radeonfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = radeonfb_check_var,
- .fb_set_par = radeonfb_set_par,
- .fb_setcolreg = radeonfb_setcolreg,
- .fb_pan_display = radeonfb_pan_display,
- .fb_blank = radeonfb_blank,
- .fb_ioctl = radeonfb_ioctl,
-#if 0
- .fb_fillrect = radeonfb_fillrect,
- .fb_copyarea = radeonfb_copyarea,
- .fb_imageblit = radeonfb_imageblit,
- .fb_rasterimg = radeonfb_rasterimg,
-#else
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-#endif
-};
-
-
-static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
-{
- struct fb_info *info;
-
- info = &rinfo->info;
-
- info->par = rinfo;
- info->pseudo_palette = rinfo->pseudo_palette;
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
- info->fbops = &radeonfb_ops;
- info->screen_base = rinfo->fb_base;
-
- /* Fill fix common fields */
- strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
- info->fix.smem_start = rinfo->fb_base_phys;
- info->fix.smem_len = rinfo->video_ram;
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- info->fix.xpanstep = 8;
- info->fix.ypanstep = 1;
- info->fix.ywrapstep = 0;
- info->fix.type_aux = 0;
- info->fix.mmio_start = rinfo->mmio_base_phys;
- info->fix.mmio_len = RADEON_REGSIZE;
- if (noaccel)
- info->fix.accel = FB_ACCEL_NONE;
- else
- info->fix.accel = FB_ACCEL_ATI_RADEON;
-
- if (radeon_init_disp (rinfo) < 0)
- return -1;
-
- return 0;
-}
-
-
-#ifdef CONFIG_PMAC_BACKLIGHT
-
-/* TODO: Dbl check these tables, we don't go up to full ON backlight
- * in these, possibly because we noticed MacOS doesn't, but I'd prefer
- * having some more official numbers from ATI
- */
-static int backlight_conv_m6[] = {
- 0xff, 0xc0, 0xb5, 0xaa, 0x9f, 0x94, 0x89, 0x7e,
- 0x73, 0x68, 0x5d, 0x52, 0x47, 0x3c, 0x31, 0x24
-};
-static int backlight_conv_m7[] = {
- 0x00, 0x3f, 0x4a, 0x55, 0x60, 0x6b, 0x76, 0x81,
- 0x8c, 0x97, 0xa2, 0xad, 0xb8, 0xc3, 0xce, 0xd9
-};
-
-#define BACKLIGHT_LVDS_OFF
-#undef BACKLIGHT_DAC_OFF
-
-/* We turn off the LCD completely instead of just dimming the backlight.
- * This provides some greater power saving and the display is useless
- * without backlight anyway.
- */
-
-static int radeon_set_backlight_enable(int on, int level, void *data)
-{
- struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
- unsigned int lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
- int* conv_table;
-
- /* Pardon me for that hack... maybe some day we can figure
- * out in what direction backlight should work on a given
- * panel ?
- */
- if ((rinfo->arch == RADEON_M7 || rinfo->arch == RADEON_M9)
- && !machine_is_compatible("PowerBook4,3"))
- conv_table = backlight_conv_m7;
- else
- conv_table = backlight_conv_m6;
-
- lvds_gen_cntl |= (LVDS_BL_MOD_EN | LVDS_BLON);
- if (on && (level > BACKLIGHT_OFF)) {
- lvds_gen_cntl |= LVDS_DIGON;
- if (!(lvds_gen_cntl & LVDS_ON)) {
- lvds_gen_cntl &= ~LVDS_BLON;
- OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
- (void)INREG(LVDS_GEN_CNTL);
- mdelay(10);
- lvds_gen_cntl |= LVDS_BLON;
- OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
- }
- lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
- lvds_gen_cntl |= (conv_table[level] <<
- LVDS_BL_MOD_LEVEL_SHIFT);
- lvds_gen_cntl |= (LVDS_ON | LVDS_EN);
- lvds_gen_cntl &= ~LVDS_DISPLAY_DIS;
- } else {
- lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
- lvds_gen_cntl |= (conv_table[0] <<
- LVDS_BL_MOD_LEVEL_SHIFT);
- lvds_gen_cntl |= LVDS_DISPLAY_DIS;
- OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
- udelay(10);
- lvds_gen_cntl &= ~(LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGON);
- }
-
- OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
- rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
- rinfo->init_state.lvds_gen_cntl |= (lvds_gen_cntl & LVDS_STATE_MASK);
-
- return 0;
-}
-
-static int radeon_set_backlight_level(int level, void *data)
-{
- return radeon_set_backlight_enable(1, level, data);
-}
-#endif /* CONFIG_PMAC_BACKLIGHT */
-
-
-#ifdef CONFIG_PMAC_PBOOK
-
-static u32 dbg_clk;
-
-/*
- * Radeon M6 Power Management code. This code currently only supports
- * the mobile chips, it's based from some informations provided by ATI
- * along with hours of tracing of MacOS drivers
- */
-
-static void radeon_pm_save_regs(struct radeonfb_info *rinfo)
-{
- rinfo->save_regs[0] = INPLL(PLL_PWRMGT_CNTL);
- rinfo->save_regs[1] = INPLL(CLK_PWRMGT_CNTL);
- rinfo->save_regs[2] = INPLL(MCLK_CNTL);
- rinfo->save_regs[3] = INPLL(SCLK_CNTL);
- rinfo->save_regs[4] = INPLL(CLK_PIN_CNTL);
- rinfo->save_regs[5] = INPLL(VCLK_ECP_CNTL);
- rinfo->save_regs[6] = INPLL(PIXCLKS_CNTL);
- rinfo->save_regs[7] = INPLL(MCLK_MISC);
- rinfo->save_regs[8] = INPLL(P2PLL_CNTL);
-
- rinfo->save_regs[9] = INREG(DISP_MISC_CNTL);
- rinfo->save_regs[10] = INREG(DISP_PWR_MAN);
- rinfo->save_regs[11] = INREG(LVDS_GEN_CNTL);
- rinfo->save_regs[12] = INREG(LVDS_PLL_CNTL);
- rinfo->save_regs[13] = INREG(TV_DAC_CNTL);
- rinfo->save_regs[14] = INREG(BUS_CNTL1);
- rinfo->save_regs[15] = INREG(CRTC_OFFSET_CNTL);
- rinfo->save_regs[16] = INREG(AGP_CNTL);
- rinfo->save_regs[17] = (INREG(CRTC_GEN_CNTL) & 0xfdffffff) | 0x04000000;
- rinfo->save_regs[18] = (INREG(CRTC2_GEN_CNTL) & 0xfdffffff) | 0x04000000;
- rinfo->save_regs[19] = INREG(GPIOPAD_A);
- rinfo->save_regs[20] = INREG(GPIOPAD_EN);
- rinfo->save_regs[21] = INREG(GPIOPAD_MASK);
- rinfo->save_regs[22] = INREG(ZV_LCDPAD_A);
- rinfo->save_regs[23] = INREG(ZV_LCDPAD_EN);
- rinfo->save_regs[24] = INREG(ZV_LCDPAD_MASK);
- rinfo->save_regs[25] = INREG(GPIO_VGA_DDC);
- rinfo->save_regs[26] = INREG(GPIO_DVI_DDC);
- rinfo->save_regs[27] = INREG(GPIO_MONID);
- rinfo->save_regs[28] = INREG(GPIO_CRT2_DDC);
-
- rinfo->save_regs[29] = INREG(SURFACE_CNTL);
- rinfo->save_regs[30] = INREG(MC_FB_LOCATION);
- rinfo->save_regs[31] = INREG(DISPLAY_BASE_ADDR);
- rinfo->save_regs[32] = INREG(MC_AGP_LOCATION);
- rinfo->save_regs[33] = INREG(CRTC2_DISPLAY_BASE_ADDR);
-}
-
-static void radeon_pm_restore_regs(struct radeonfb_info *rinfo)
-{
- OUTPLL(P2PLL_CNTL, rinfo->save_regs[8] & 0xFFFFFFFE); /* First */
-
- OUTPLL(PLL_PWRMGT_CNTL, rinfo->save_regs[0]);
- OUTPLL(CLK_PWRMGT_CNTL, rinfo->save_regs[1]);
- OUTPLL(MCLK_CNTL, rinfo->save_regs[2]);
- OUTPLL(SCLK_CNTL, rinfo->save_regs[3]);
- OUTPLL(CLK_PIN_CNTL, rinfo->save_regs[4]);
- OUTPLL(VCLK_ECP_CNTL, rinfo->save_regs[5]);
- OUTPLL(PIXCLKS_CNTL, rinfo->save_regs[6]);
- OUTPLL(MCLK_MISC, rinfo->save_regs[7]);
-
- OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]);
- OUTREG(DISP_PWR_MAN, rinfo->save_regs[10]);
- OUTREG(LVDS_GEN_CNTL, rinfo->save_regs[11]);
- OUTREG(LVDS_PLL_CNTL,rinfo->save_regs[12]);
- OUTREG(TV_DAC_CNTL, rinfo->save_regs[13]);
- OUTREG(BUS_CNTL1, rinfo->save_regs[14]);
- OUTREG(CRTC_OFFSET_CNTL, rinfo->save_regs[15]);
- OUTREG(AGP_CNTL, rinfo->save_regs[16]);
- OUTREG(CRTC_GEN_CNTL, rinfo->save_regs[17]);
- OUTREG(CRTC2_GEN_CNTL, rinfo->save_regs[18]);
-
- // wait VBL before that one ?
- OUTPLL(P2PLL_CNTL, rinfo->save_regs[8]);
-
- OUTREG(GPIOPAD_A, rinfo->save_regs[19]);
- OUTREG(GPIOPAD_EN, rinfo->save_regs[20]);
- OUTREG(GPIOPAD_MASK, rinfo->save_regs[21]);
- OUTREG(ZV_LCDPAD_A, rinfo->save_regs[22]);
- OUTREG(ZV_LCDPAD_EN, rinfo->save_regs[23]);
- OUTREG(ZV_LCDPAD_MASK, rinfo->save_regs[24]);
- OUTREG(GPIO_VGA_DDC, rinfo->save_regs[25]);
- OUTREG(GPIO_DVI_DDC, rinfo->save_regs[26]);
- OUTREG(GPIO_MONID, rinfo->save_regs[27]);
- OUTREG(GPIO_CRT2_DDC, rinfo->save_regs[28]);
-}
-
-static void radeon_pm_disable_iopad(struct radeonfb_info *rinfo)
-{
- OUTREG(GPIOPAD_MASK, 0x0001ffff);
- OUTREG(GPIOPAD_EN, 0x00000400);
- OUTREG(GPIOPAD_A, 0x00000000);
- OUTREG(ZV_LCDPAD_MASK, 0x00000000);
- OUTREG(ZV_LCDPAD_EN, 0x00000000);
- OUTREG(ZV_LCDPAD_A, 0x00000000);
- OUTREG(GPIO_VGA_DDC, 0x00030000);
- OUTREG(GPIO_DVI_DDC, 0x00000000);
- OUTREG(GPIO_MONID, 0x00030000);
- OUTREG(GPIO_CRT2_DDC, 0x00000000);
-}
-
-static void radeon_pm_program_v2clk(struct radeonfb_info *rinfo)
-{
-//
-// u32 reg;
-//
-// OUTPLL(P2PLL_REF_DIV, 0x0c);
-//
-// .../... figure out what macos does here
-}
-
-static void radeon_pm_low_current(struct radeonfb_info *rinfo)
-{
- u32 reg;
-
- reg = INREG(BUS_CNTL1);
- reg &= ~BUS_CNTL1_MOBILE_PLATFORM_SEL_MASK;
- reg |= BUS_CNTL1_AGPCLK_VALID | (1<<BUS_CNTL1_MOBILE_PLATFORM_SEL_SHIFT);
- OUTREG(BUS_CNTL1, reg);
-
- reg = INPLL(PLL_PWRMGT_CNTL);
- reg |= PLL_PWRMGT_CNTL_SPLL_TURNOFF | PLL_PWRMGT_CNTL_PPLL_TURNOFF |
- PLL_PWRMGT_CNTL_P2PLL_TURNOFF | PLL_PWRMGT_CNTL_TVPLL_TURNOFF;
- reg &= ~PLL_PWRMGT_CNTL_SU_MCLK_USE_BCLK;
- reg &= ~PLL_PWRMGT_CNTL_MOBILE_SU;
- OUTPLL(PLL_PWRMGT_CNTL, reg);
-
-// reg = INPLL(TV_PLL_CNTL1);
-// reg |= TV_PLL_CNTL1__TVPLL_RESET | TV_PLL_CNTL1__TVPLL_SLEEP;
-// OUTPLL(TV_PLL_CNTL1, reg);
-
- reg = INREG(TV_DAC_CNTL);
- reg &= ~(TV_DAC_CNTL_BGADJ_MASK |TV_DAC_CNTL_DACADJ_MASK);
- reg |=TV_DAC_CNTL_BGSLEEP | TV_DAC_CNTL_RDACPD | TV_DAC_CNTL_GDACPD |
- TV_DAC_CNTL_BDACPD |
- (8<<TV_DAC_CNTL_BGADJ__SHIFT) | (8<<TV_DAC_CNTL_DACADJ__SHIFT);
- OUTREG(TV_DAC_CNTL, reg);
-
- reg = INREG(TMDS_TRANSMITTER_CNTL);
- reg &= ~(TMDS_PLL_EN |TMDS_PLLRST);
- OUTREG(TMDS_TRANSMITTER_CNTL, reg);
-
-// lvds_pll_cntl = regr32(g, LVDS_PLL_CNTL);
-// lvds_pll_cntl &= ~LVDS_PLL_CNTL__LVDS_PLL_EN;
-// lvds_pll_cntl |= LVDS_PLL_CNTL__LVDS_PLL_RESET;
-// regw32(g, LVDS_PLL_CNTL, lvds_pll_cntl);
-
- reg = INREG(DAC_CNTL);
- reg &= ~DAC_CMP_EN;
- OUTREG(DAC_CNTL, reg);
-
- reg = INREG(DAC_CNTL2);
- reg &= ~DAC2_CMP_EN;
- OUTREG(DAC_CNTL2, reg);
-
- reg = INREG(TV_DAC_CNTL);
- reg &= ~TV_DAC_CNTL_DETECT;
- OUTREG(TV_DAC_CNTL, reg);
-}
-
-static void radeon_pm_setup_for_suspend(struct radeonfb_info *rinfo)
-{
- /* This code is disabled. It does what is in the pm_init
- * function of the MacOS driver code ATI sent me. However,
- * it doesn't fix my sleep problem, and is causing other issues
- * on wakeup (bascially the machine dying when switching consoles
- * I haven't had time to investigate this yet
- */
-#if 0
- u32 disp_misc_cntl;
- u32 disp_pwr_man;
- u32 temp;
-
- // set SPLL, MPLL, PPLL, P2PLL, TVPLL, SCLK, MCLK, PCLK, P2CLK,
- // TCLK and TEST_MODE to 0
- temp = INPLL(CLK_PWRMGT_CNTL);
- OUTPLL(CLK_PWRMGT_CNTL , temp & ~0xc00002ff);
-
- // Turn on Power Management
- temp = INPLL(CLK_PWRMGT_CNTL);
- OUTPLL(CLK_PWRMGT_CNTL , temp | 0x00000400);
-
- // Turn off display clock if using mobile chips
- temp = INPLL(CLK_PWRMGT_CNTL);
- OUTREG(CLK_PWRMGT_CNTL , temp | 0x00100000);
-
- // Force PIXCLK_ALWAYS_ON and PIXCLK_DAC_ALWAYS_ON
- temp = INPLL(VCLK_ECP_CNTL);
- OUTPLL(VCLK_ECP_CNTL, temp & ~0x000000c0);
-
- // Force ECP_FORCE_ON to 1
- temp = INPLL(VCLK_ECP_CNTL);
- OUTPLL(VCLK_ECP_CNTL, temp | 0x00040000);
-
- // Force PIXCLK_BLEND_ALWAYS_ON and PIXCLK_GV_ALWAYS_ON
- temp = INPLL(PIXCLKS_CNTL);
- OUTPLL(PIXCLKS_CNTL, temp & ~0x00001800);
-
- // Forcing SCLK_CNTL to ON
- OUTPLL(SCLK_CNTL, (INPLL(SCLK_CNTL)& 0x00000007) | 0xffff8000 );
-
- // Set PM control over XTALIN pad
- temp = INPLL(CLK_PIN_CNTL);
- OUTPLL(CLK_PIN_CNTL, temp | 0x00080000);
-
- // Force MCLK and YCLK and MC as dynamic
- temp = INPLL(MCLK_CNTL);
- OUTPLL(MCLK_CNTL, temp & 0xffeaffff);
-
- // PLL_TURNOFF
- temp = INPLL(PLL_PWRMGT_CNTL);
- OUTPLL(PLL_PWRMGT_CNTL, temp | 0x0000001f);
-
- // set MOBILE_SU to 1 if M6 or DDR64 is detected
- temp = INPLL(PLL_PWRMGT_CNTL);
- OUTPLL(PLL_PWRMGT_CNTL, temp | 0x00010000);
-
- // select PM access mode (PM_MODE_SEL) (use ACPI mode)
-// temp = INPLL(PLL_PWRMGT_CNTL);
-// OUTPLL(PLL_PWRMGT_CNTL, temp | 0x00002000);
- temp = INPLL(PLL_PWRMGT_CNTL);
- OUTPLL(PLL_PWRMGT_CNTL, temp & ~0x00002000);
-
- // set DISP_MISC_CNTL register
- disp_misc_cntl = INREG(DISP_MISC_CNTL);
- disp_misc_cntl &= ~( DISP_MISC_CNTL_SOFT_RESET_GRPH_PP |
- DISP_MISC_CNTL_SOFT_RESET_SUBPIC_PP |
- DISP_MISC_CNTL_SOFT_RESET_OV0_PP |
- DISP_MISC_CNTL_SOFT_RESET_GRPH_SCLK |
- DISP_MISC_CNTL_SOFT_RESET_SUBPIC_SCLK |
- DISP_MISC_CNTL_SOFT_RESET_OV0_SCLK |
- DISP_MISC_CNTL_SOFT_RESET_GRPH2_PP |
- DISP_MISC_CNTL_SOFT_RESET_GRPH2_SCLK |
- DISP_MISC_CNTL_SOFT_RESET_LVDS |
- DISP_MISC_CNTL_SOFT_RESET_TMDS |
- DISP_MISC_CNTL_SOFT_RESET_DIG_TMDS |
- DISP_MISC_CNTL_SOFT_RESET_TV);
- OUTREG(DISP_MISC_CNTL, disp_misc_cntl);
-
- // set DISP_PWR_MAN register
- disp_pwr_man = INREG(DISP_PWR_MAN);
- // clau - 9.29.2000 - changes made to bit23:18 to set to 1 as requested by George
- disp_pwr_man |= (DISP_PWR_MAN_DIG_TMDS_ENABLE_RST |
- DISP_PWR_MAN_TV_ENABLE_RST |
- // DISP_PWR_MAN_AUTO_PWRUP_EN |
- DISP_PWR_MAN_DISP_D3_GRPH_RST |
- DISP_PWR_MAN_DISP_D3_SUBPIC_RST |
- DISP_PWR_MAN_DISP_D3_OV0_RST |
- DISP_PWR_MAN_DISP_D1D2_GRPH_RST |
- DISP_PWR_MAN_DISP_D1D2_SUBPIC_RST |
- DISP_PWR_MAN_DISP_D1D2_OV0_RST);
- disp_pwr_man &= ~(DISP_PWR_MAN_DISP_PWR_MAN_D3_CRTC_EN |
- DISP_PWR_MAN_DISP2_PWR_MAN_D3_CRTC2_EN|
- DISP_PWR_MAN_DISP_D3_RST |
- DISP_PWR_MAN_DISP_D3_REG_RST);
- OUTREG(DISP_PWR_MAN, disp_pwr_man);
-
- // clau - 10.24.2000
- // - add in setting for BUS_CNTL1 b27:26 = 0x01 and b31 = 0x1
- // - add in setting for AGP_CNTL b7:0 = 0x20
- // - add in setting for DVI_DDC_DATA_OUT_EN b17:16 = 0x0
-
- // the following settings (two lines) are applied at a later part of this function, only on mobile platform
- // requres -mobile flag
- OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1) & 0xf3ffffff) | 0x04000000);
- OUTREG(BUS_CNTL1, INREG(BUS_CNTL1) | 0x80000000);
- OUTREG(AGP_CNTL, (INREG(AGP_CNTL) & 0xffffff00) | 0x20);
- OUTREG(GPIO_DVI_DDC, INREG(GPIO_DVI_DDC) & 0xfffcffff);
-
- // yulee - 12.12.2000
- // A12 only
- // EN_MCLK_TRISTATE_IN_SUSPEND@MCLK_MISC = 1
- // ACCESS_REGS_IN_SUSPEND@CLK_PIN_CNTL = 0
- // only on mobile platform
- OUTPLL(MCLK_MISC, INPLL(MCLK_MISC) | 0x00040000 );
-
- // yulee -12.12.2000
- // AGPCLK_VALID@BUS_CNTL1 = 1
- // MOBILE_PLATFORM_SEL@BUS_CNTL1 = 01
- // CRTC_STEREO_SYNC_OUT_EN@CRTC_OFFSET_CNTL = 0
- // CG_CLK_TO_OUTPIN@CLK_PIN_CNTL = 0
- // only on mobile platform
- OUTPLL(CLK_PIN_CNTL, INPLL(CLK_PIN_CNTL ) & 0xFFFFF7FF );
- OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1 ) & 0xF3FFFFFF) | 0x84000000 );
- OUTREG(CRTC_OFFSET_CNTL, INREG(CRTC_OFFSET_CNTL ) & 0xFFEFFFFF );
-
- mdelay(100);
-#endif
-
- /* Disable CRTCs */
- OUTREG(CRTC_GEN_CNTL, (INREG(CRTC_GEN_CNTL) & ~CRTC_EN) | CRTC_DISP_REQ_EN_B);
- OUTREG(CRTC2_GEN_CNTL, (INREG(CRTC2_GEN_CNTL) & ~CRTC2_EN) | CRTC2_DISP_REQ_EN_B);
- (void)INREG(CRTC2_GEN_CNTL);
- mdelay(17);
-}
-
-static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
-{
- u16 pwr_cmd;
-
- if (!rinfo->pm_reg)
- return;
-
- /* Set the chip into appropriate suspend mode (we use D2,
- * D3 would require a compete re-initialization of the chip,
- * including PCI config registers, clocks, AGP conf, ...)
- */
- if (suspend) {
- /* According to ATI, we should program V2CLK here, I have
- * to verify what's up exactly
- */
- /* Save some registers */
- radeon_pm_save_regs(rinfo);
-
- /* Check that on M7 too, might work might not. M7 may also
- * need explicit enabling of PM
- */
- if (rinfo->arch == RADEON_M6) {
- /* Program V2CLK */
- radeon_pm_program_v2clk(rinfo);
-
- /* Disable IO PADs */
- radeon_pm_disable_iopad(rinfo);
-
- /* Set low current */
- radeon_pm_low_current(rinfo);
-
- /* Prepare chip for power management */
- radeon_pm_setup_for_suspend(rinfo);
-
- /* Reset the MDLL */
- OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) | MCKOA_RESET);
- (void)INPLL(MDLL_RDCKA);
- OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) & ~MCKOA_RESET);
- (void)INPLL(MDLL_RDCKA);
- }
-
- /* Switch PCI power managment to D2. */
- for (;;) {
- pci_read_config_word(
- rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL,
- &pwr_cmd);
- if (pwr_cmd & 2)
- break;
- pci_write_config_word(
- rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL,
- (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2);
- mdelay(500);
- }
- } else {
- /* Switch back PCI powermanagment to D0 */
- mdelay(200);
- pci_write_config_word(rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL, 0);
- mdelay(500);
-
- dbg_clk = INPLL(1);
-
- /* Do we need that on M7 ? */
- if (rinfo->arch == RADEON_M6) {
- /* Restore the MDLL */
- OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) & ~MCKOA_RESET);
- (void)INPLL(MDLL_CKO);
- }
-
- /* Restore some registers */
- radeon_pm_restore_regs(rinfo);
- }
-}
-
-/*
- * Save the contents of the framebuffer when we go to sleep,
- * and restore it when we wake up again.
- */
-
-int radeon_sleep_notify(struct pmu_sleep_notifier *self, int when)
-{
- struct radeonfb_info *rinfo;
-
- for (rinfo = board_list; rinfo != NULL; rinfo = rinfo->next) {
- struct fb_fix_screeninfo fix;
- int nb;
- struct display *disp;
-
- disp = (rinfo->currcon < 0) ? rinfo->info.disp : &fb_display[rinfo->currcon];
-
- switch (rinfo->arch) {
- case RADEON_M6:
- case RADEON_M7:
- case RADEON_M9:
- break;
- default:
- return PBOOK_SLEEP_REFUSE;
- }
-
- radeonfb_get_fix(&fix, fg_console, (struct fb_info *)rinfo);
- nb = fb_display[fg_console].var.yres * fix.line_length;
-
- switch (when) {
- case PBOOK_SLEEP_NOW:
- acquire_console_sem();
- disp->dispsw = &fbcon_dummy;
-
- if (!noaccel) {
- /* Make sure engine is reset */
- radeon_engine_reset();
- radeon_engine_idle();
- }
-
- /* Blank display and LCD */
- radeonfb_blank(VESA_POWERDOWN+1,
- (struct fb_info *)rinfo);
-
- /* Sleep */
- rinfo->asleep = 1;
- radeon_set_suspend(rinfo, 1);
- release_console_sem();
-
- break;
- case PBOOK_WAKE:
- acquire_console_sem();
- /* Wakeup */
- radeon_set_suspend(rinfo, 0);
-
- if (!noaccel)
- radeon_engine_init(rinfo);
- rinfo->asleep = 0;
- radeon_set_dispsw(rinfo, disp);
- radeon_load_video_mode(rinfo, &disp->var);
- do_install_cmap(rinfo->currcon < 0 ? 0 : rinfo->currcon,
- (struct fb_info *)rinfo);
-
- radeonfb_blank(0, (struct fb_info *)rinfo);
- release_console_sem();
- printk("CLK_PIN_CNTL on wakeup was: %08x\n", dbg_clk);
- break;
- }
- }
-
- return PBOOK_SLEEP_OK;
-}
-
-#endif /* CONFIG_PMAC_PBOOK */
-
-static int radeonfb_pci_register (struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct radeonfb_info *rinfo;
- struct radeon_chip_info *rci = &radeon_chip_info[ent->driver_data];
- u32 tmp;
-
- RTRACE("radeonfb_pci_register BEGIN\n");
-
- /* Enable device in PCI config */
- if (pci_enable_device(pdev) != 0) {
- printk(KERN_ERR "radeonfb: Cannot enable PCI device\n");
- return -ENODEV;
- }
-
- rinfo = kmalloc (sizeof (struct radeonfb_info), GFP_KERNEL);
- if (!rinfo) {
- printk ("radeonfb: could not allocate memory\n");
- return -ENODEV;
- }
-
- memset (rinfo, 0, sizeof (struct radeonfb_info));
- //info = &rinfo->info;
- rinfo->pdev = pdev;
- strcpy(rinfo->name, rci->name);
- rinfo->arch = rci->arch;
-
- /* Set base addrs */
- rinfo->fb_base_phys = pci_resource_start (pdev, 0);
- rinfo->mmio_base_phys = pci_resource_start (pdev, 2);
-
- /* request the mem regions */
- if (!request_mem_region (rinfo->fb_base_phys,
- pci_resource_len(pdev, 0), "radeonfb")) {
- printk ("radeonfb: cannot reserve FB region\n");
- kfree (rinfo);
- return -ENODEV;
- }
-
- if (!request_mem_region (rinfo->mmio_base_phys,
- pci_resource_len(pdev, 2), "radeonfb")) {
- printk ("radeonfb: cannot reserve MMIO region\n");
- release_mem_region (rinfo->fb_base_phys,
- pci_resource_len(pdev, 0));
- kfree (rinfo);
- return -ENODEV;
- }
-
- /* map the regions */
- rinfo->mmio_base = ioremap (rinfo->mmio_base_phys, RADEON_REGSIZE);
- if (!rinfo->mmio_base) {
- printk ("radeonfb: cannot map MMIO\n");
- release_mem_region (rinfo->mmio_base_phys,
- pci_resource_len(pdev, 2));
- release_mem_region (rinfo->fb_base_phys,
- pci_resource_len(pdev, 0));
- kfree (rinfo);
- return -ENODEV;
- }
-
- rinfo->chipset = pdev->device;
-
- switch (rinfo->arch) {
- case RADEON_R100:
- rinfo->hasCRTC2 = 0;
- break;
- default:
- /* all the rest have it */
- rinfo->hasCRTC2 = 1;
- break;
- }
-#if 0
- if (rinfo->arch == RADEON_M7) {
- /*
- * Noticed some errors in accel with M7, will have to work these out...
- */
- noaccel = 1;
- }
-#endif
- if (mirror)
- printk("radeonfb: mirroring display to CRT\n");
-
- /* framebuffer size */
- tmp = INREG(CONFIG_MEMSIZE);
-
- /* mem size is bits [28:0], mask off the rest */
- rinfo->video_ram = tmp & CONFIG_MEMSIZE_MASK;
-
- /* ram type */
- tmp = INREG(MEM_SDRAM_MODE_REG);
- switch ((MEM_CFG_TYPE & tmp) >> 30) {
- case 0:
- /* SDR SGRAM (2:1) */
- strcpy(rinfo->ram_type, "SDR SGRAM");
- rinfo->ram.ml = 4;
- rinfo->ram.mb = 4;
- rinfo->ram.trcd = 1;
- rinfo->ram.trp = 2;
- rinfo->ram.twr = 1;
- rinfo->ram.cl = 2;
- rinfo->ram.loop_latency = 16;
- rinfo->ram.rloop = 16;
-
- break;
- case 1:
- /* DDR SGRAM */
- strcpy(rinfo->ram_type, "DDR SGRAM");
- rinfo->ram.ml = 4;
- rinfo->ram.mb = 4;
- rinfo->ram.trcd = 3;
- rinfo->ram.trp = 3;
- rinfo->ram.twr = 2;
- rinfo->ram.cl = 3;
- rinfo->ram.tr2w = 1;
- rinfo->ram.loop_latency = 16;
- rinfo->ram.rloop = 16;
-
- break;
- default:
- /* 64-bit SDR SGRAM */
- strcpy(rinfo->ram_type, "SDR SGRAM 64");
- rinfo->ram.ml = 4;
- rinfo->ram.mb = 8;
- rinfo->ram.trcd = 3;
- rinfo->ram.trp = 3;
- rinfo->ram.twr = 1;
- rinfo->ram.cl = 3;
- rinfo->ram.tr2w = 1;
- rinfo->ram.loop_latency = 17;
- rinfo->ram.rloop = 17;
-
- break;
- }
-
- rinfo->bios_seg = radeon_find_rom(rinfo);
- radeon_get_pllinfo(rinfo, rinfo->bios_seg);
-
- /*
- * Hack to get around some busted production M6's
- * reporting no ram
- */
- if (rinfo->video_ram == 0) {
- switch (pdev->device) {
- case PCI_DEVICE_ID_ATI_RADEON_LY:
- case PCI_DEVICE_ID_ATI_RADEON_LZ:
- rinfo->video_ram = 8192 * 1024;
- break;
- default:
- break;
- }
- }
-
-
- RTRACE("radeonfb: probed %s %dk videoram\n", (rinfo->ram_type), (rinfo->video_ram/1024));
-
-#if !defined(__powerpc__)
- radeon_get_moninfo(rinfo);
-#else
- switch (pdev->device) {
- case PCI_DEVICE_ID_ATI_RADEON_LW:
- case PCI_DEVICE_ID_ATI_RADEON_LX:
- case PCI_DEVICE_ID_ATI_RADEON_LY:
- case PCI_DEVICE_ID_ATI_RADEON_LZ:
- rinfo->dviDisp_type = MT_LCD;
- break;
- default:
- radeon_get_moninfo(rinfo);
- break;
- }
-#endif
-
- radeon_get_EDID(rinfo);
-
- if ((rinfo->dviDisp_type == MT_DFP) || (rinfo->dviDisp_type == MT_LCD) ||
- (rinfo->crtDisp_type == MT_DFP)) {
- if (!radeon_get_dfpinfo(rinfo)) {
- iounmap(rinfo->mmio_base);
- release_mem_region (rinfo->mmio_base_phys,
- pci_resource_len(pdev, 2));
- release_mem_region (rinfo->fb_base_phys,
- pci_resource_len(pdev, 0));
- kfree (rinfo);
- return -ENODEV;
- }
- }
-
- rinfo->fb_base = ioremap (rinfo->fb_base_phys, rinfo->video_ram);
- if (!rinfo->fb_base) {
- printk ("radeonfb: cannot map FB\n");
- iounmap(rinfo->mmio_base);
- release_mem_region (rinfo->mmio_base_phys,
- pci_resource_len(pdev, 2));
- release_mem_region (rinfo->fb_base_phys,
- pci_resource_len(pdev, 0));
- kfree (rinfo);
- return -ENODEV;
- }
-
- /* I SHOULD FIX THAT CRAP ! I should probably mimmic XFree DRI
- * driver setup here.
- *
- * On PPC, OF based cards setup the internal memory
- * mapping in strange ways. We change it so that the
- * framebuffer is mapped at 0 and given half of the card's
- * address space (2Gb). AGP is mapped high (0xe0000000) and
- * can use up to 512Mb. Once DRI is fully implemented, we
- * will have to setup the PCI remapper to remap the agp_special_page
- * memory page somewhere between those regions so that the card
- * use a normal PCI bus master cycle to access the ring read ptr.
- * --BenH.
- */
-#ifdef CONFIG_ALL_PPC
- if (rinfo->hasCRTC2)
- OUTREG(CRTC2_GEN_CNTL,
- (INREG(CRTC2_GEN_CNTL) & ~CRTC2_EN) | CRTC2_DISP_REQ_EN_B);
- OUTREG(CRTC_EXT_CNTL, INREG(CRTC_EXT_CNTL) | CRTC_DISPLAY_DIS);
- OUTREG(MC_FB_LOCATION, 0x7fff0000);
- OUTREG(MC_AGP_LOCATION, 0xffffe000);
- OUTREG(DISPLAY_BASE_ADDR, 0x00000000);
- if (rinfo->hasCRTC2)
- OUTREG(CRTC2_DISPLAY_BASE_ADDR, 0x00000000);
- OUTREG(SRC_OFFSET, 0x00000000);
- OUTREG(DST_OFFSET, 0x00000000);
- mdelay(10);
- OUTREG(CRTC_EXT_CNTL, INREG(CRTC_EXT_CNTL) & ~CRTC_DISPLAY_DIS);
-#endif /* CONFIG_ALL_PPC */
-
- /* save current mode regs before we switch into the new one
- * so we can restore this upon __exit
- */
- radeon_save_state (rinfo, &rinfo->init_state);
-
- /* set all the vital stuff */
- radeon_set_fbinfo (rinfo);
-
- pci_set_drvdata(pdev, rinfo);
- rinfo->next = board_list;
- board_list = rinfo;
- ((struct fb_info *) rinfo)->device = &pdev->dev;
- if (register_framebuffer ((struct fb_info *) rinfo) < 0) {
- printk ("radeonfb: could not register framebuffer\n");
- iounmap(rinfo->fb_base);
- iounmap(rinfo->mmio_base);
- release_mem_region (rinfo->mmio_base_phys,
- pci_resource_len(pdev, 2));
- release_mem_region (rinfo->fb_base_phys,
- pci_resource_len(pdev, 0));
- kfree (rinfo);
- return -ENODEV;
- }
-
-#ifdef CONFIG_MTRR
- rinfo->mtrr_hdl = nomtrr ? -1 : mtrr_add(rinfo->fb_base_phys,
- rinfo->video_ram,
- MTRR_TYPE_WRCOMB, 1);
-#endif
-
-#ifdef CONFIG_PMAC_BACKLIGHT
- if (rinfo->dviDisp_type == MT_LCD)
- register_backlight_controller(&radeon_backlight_controller,
- rinfo, "ati");
-#endif
-
-#ifdef CONFIG_PMAC_PBOOK
- if (rinfo->dviDisp_type == MT_LCD) {
- rinfo->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM);
- pmu_register_sleep_notifier(&radeon_sleep_notifier);
- }
-#endif
-
- printk ("radeonfb: ATI Radeon %s %s %d MB\n", rinfo->name, rinfo->ram_type,
- (rinfo->video_ram/(1024*1024)));
-
- if (rinfo->hasCRTC2) {
- printk("radeonfb: DVI port %s monitor connected\n",
- GET_MON_NAME(rinfo->dviDisp_type));
- printk("radeonfb: CRT port %s monitor connected\n",
- GET_MON_NAME(rinfo->crtDisp_type));
- } else {
- printk("radeonfb: CRT port %s monitor connected\n",
- GET_MON_NAME(rinfo->crtDisp_type));
- }
-
- RTRACE("radeonfb_pci_register END\n");
-
- return 0;
-}
-
-
-
-static void __devexit radeonfb_pci_unregister (struct pci_dev *pdev)
-{
- struct radeonfb_info *rinfo = pci_get_drvdata(pdev);
-
- if (!rinfo)
- return;
-
- /* restore original state
- *
- * Doesn't quite work yet, possibly because of the PPC hacking
- * I do on startup, disable for now. --BenH
- */
- radeon_write_mode (rinfo, &rinfo->init_state);
-
-#ifdef CONFIG_MTRR
- if (rinfo->mtrr_hdl >= 0)
- mtrr_del(rinfo->mtrr_hdl, 0, 0);
-#endif
-
- unregister_framebuffer ((struct fb_info *) rinfo);
-
- iounmap(rinfo->mmio_base);
- iounmap(rinfo->fb_base);
-
- release_mem_region (rinfo->mmio_base_phys,
- pci_resource_len(pdev, 2));
- release_mem_region (rinfo->fb_base_phys,
- pci_resource_len(pdev, 0));
-
- kfree (rinfo);
-}
-
-
-static struct pci_driver radeonfb_driver = {
- .name = "radeonfb",
- .id_table = radeonfb_pci_table,
- .probe = radeonfb_pci_register,
- .remove = __devexit_p(radeonfb_pci_unregister),
-};
-
-#ifndef MODULE
-static int __init radeonfb_old_setup (char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep (&options, ",")) != NULL) {
- if (!*this_opt)
- continue;
- if (!strncmp(this_opt, "noaccel", 7)) {
- noaccel = 1;
- } else if (!strncmp(this_opt, "mirror", 6)) {
- mirror = 1;
- } else if (!strncmp(this_opt, "dfp", 3)) {
- force_dfp = 1;
- } else if (!strncmp(this_opt, "panel_yres:", 11)) {
- panel_yres = simple_strtoul((this_opt+11), NULL, 0);
- } else if (!strncmp(this_opt, "nomtrr", 6)) {
- nomtrr = 1;
- } else
- mode_option = this_opt;
- }
-
- return 0;
-}
-#endif /* MODULE */
-
-static int __init radeonfb_old_init (void)
-{
-#ifndef MODULE
- char *option = NULL;
-
- if (fb_get_options("radeonfb_old", &option))
- return -ENODEV;
- radeonfb_old_setup(option);
-#endif
- return pci_register_driver (&radeonfb_driver);
-}
-
-
-static void __exit radeonfb_old_exit (void)
-{
- pci_unregister_driver (&radeonfb_driver);
-}
-
-module_init(radeonfb_old_init);
-module_exit(radeonfb_old_exit);
-
-
-MODULE_AUTHOR("Ani Joshi");
-MODULE_DESCRIPTION("framebuffer driver for ATI Radeon chipset");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index 8d5f35676f9a5..4a292aae6eb26 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1378,7 +1378,7 @@ stifb_setup(char *options)
int i;
if (!options || !*options)
- return 0;
+ return 1;
if (strncmp(options, "off", 3) == 0) {
stifb_disabled = 1;
@@ -1393,7 +1393,7 @@ stifb_setup(char *options)
stifb_bpp_pref[i] = simple_strtoul(options, &options, 10);
}
}
- return 0;
+ return 1;
}
__setup("stifb=", stifb_setup);
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index f6e24ee85f077..5fc86ea20692f 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -4,8 +4,9 @@
* Frame Buffer Device for ATI Imageon w100 (Wallaby)
*
* Copyright (C) 2002, ATI Corp.
- * Copyright (C) 2004-2005 Richard Purdie
+ * Copyright (C) 2004-2006 Richard Purdie
* Copyright (c) 2005 Ian Molton
+ * Copyright (c) 2006 Alberto Mardegan
*
* Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net>
*
@@ -14,6 +15,9 @@
*
* w32xx support by Ian Molton
*
+ * Hardware acceleration support by Alberto Mardegan
+ * <mardy@users.sourceforge.net>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -47,6 +51,7 @@ static void w100_set_dispregs(struct w100fb_par*);
static void w100_update_enable(void);
static void w100_update_disable(void);
static void calc_hsync(struct w100fb_par *par);
+static void w100_init_graphic_engine(struct w100fb_par *par);
struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
/* Pseudo palette size */
@@ -248,6 +253,152 @@ static int w100fb_blank(int blank_mode, struct fb_info *info)
}
+static void w100_fifo_wait(int entries)
+{
+ union rbbm_status_u status;
+ int i;
+
+ for (i = 0; i < 2000000; i++) {
+ status.val = readl(remapped_regs + mmRBBM_STATUS);
+ if (status.f.cmdfifo_avail >= entries)
+ return;
+ udelay(1);
+ }
+ printk(KERN_ERR "w100fb: FIFO Timeout!\n");
+}
+
+
+static int w100fb_sync(struct fb_info *info)
+{
+ union rbbm_status_u status;
+ int i;
+
+ for (i = 0; i < 2000000; i++) {
+ status.val = readl(remapped_regs + mmRBBM_STATUS);
+ if (!status.f.gui_active)
+ return 0;
+ udelay(1);
+ }
+ printk(KERN_ERR "w100fb: Graphic engine timeout!\n");
+ return -EBUSY;
+}
+
+
+static void w100_init_graphic_engine(struct w100fb_par *par)
+{
+ union dp_gui_master_cntl_u gmc;
+ union dp_mix_u dp_mix;
+ union dp_datatype_u dp_datatype;
+ union dp_cntl_u dp_cntl;
+
+ w100_fifo_wait(4);
+ writel(W100_FB_BASE, remapped_regs + mmDST_OFFSET);
+ writel(par->xres, remapped_regs + mmDST_PITCH);
+ writel(W100_FB_BASE, remapped_regs + mmSRC_OFFSET);
+ writel(par->xres, remapped_regs + mmSRC_PITCH);
+
+ w100_fifo_wait(3);
+ writel(0, remapped_regs + mmSC_TOP_LEFT);
+ writel((par->yres << 16) | par->xres, remapped_regs + mmSC_BOTTOM_RIGHT);
+ writel(0x1fff1fff, remapped_regs + mmSRC_SC_BOTTOM_RIGHT);
+
+ w100_fifo_wait(4);
+ dp_cntl.val = 0;
+ dp_cntl.f.dst_x_dir = 1;
+ dp_cntl.f.dst_y_dir = 1;
+ dp_cntl.f.src_x_dir = 1;
+ dp_cntl.f.src_y_dir = 1;
+ dp_cntl.f.dst_major_x = 1;
+ dp_cntl.f.src_major_x = 1;
+ writel(dp_cntl.val, remapped_regs + mmDP_CNTL);
+
+ gmc.val = 0;
+ gmc.f.gmc_src_pitch_offset_cntl = 1;
+ gmc.f.gmc_dst_pitch_offset_cntl = 1;
+ gmc.f.gmc_src_clipping = 1;
+ gmc.f.gmc_dst_clipping = 1;
+ gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE;
+ gmc.f.gmc_dst_datatype = 3; /* from DstType_16Bpp_444 */
+ gmc.f.gmc_src_datatype = SRC_DATATYPE_EQU_DST;
+ gmc.f.gmc_byte_pix_order = 1;
+ gmc.f.gmc_default_sel = 0;
+ gmc.f.gmc_rop3 = ROP3_SRCCOPY;
+ gmc.f.gmc_dp_src_source = DP_SRC_MEM_RECTANGULAR;
+ gmc.f.gmc_clr_cmp_fcn_dis = 1;
+ gmc.f.gmc_wr_msk_dis = 1;
+ gmc.f.gmc_dp_op = DP_OP_ROP;
+ writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
+
+ dp_datatype.val = dp_mix.val = 0;
+ dp_datatype.f.dp_dst_datatype = gmc.f.gmc_dst_datatype;
+ dp_datatype.f.dp_brush_datatype = gmc.f.gmc_brush_datatype;
+ dp_datatype.f.dp_src2_type = 0;
+ dp_datatype.f.dp_src2_datatype = gmc.f.gmc_src_datatype;
+ dp_datatype.f.dp_src_datatype = gmc.f.gmc_src_datatype;
+ dp_datatype.f.dp_byte_pix_order = gmc.f.gmc_byte_pix_order;
+ writel(dp_datatype.val, remapped_regs + mmDP_DATATYPE);
+
+ dp_mix.f.dp_src_source = gmc.f.gmc_dp_src_source;
+ dp_mix.f.dp_src2_source = 1;
+ dp_mix.f.dp_rop3 = gmc.f.gmc_rop3;
+ dp_mix.f.dp_op = gmc.f.gmc_dp_op;
+ writel(dp_mix.val, remapped_regs + mmDP_MIX);
+}
+
+
+static void w100fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ union dp_gui_master_cntl_u gmc;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_fillrect(info, rect);
+ return;
+ }
+
+ gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL);
+ gmc.f.gmc_rop3 = ROP3_PATCOPY;
+ gmc.f.gmc_brush_datatype = GMC_BRUSH_SOLID_COLOR;
+ w100_fifo_wait(2);
+ writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
+ writel(rect->color, remapped_regs + mmDP_BRUSH_FRGD_CLR);
+
+ w100_fifo_wait(2);
+ writel((rect->dy << 16) | (rect->dx & 0xffff), remapped_regs + mmDST_Y_X);
+ writel((rect->width << 16) | (rect->height & 0xffff),
+ remapped_regs + mmDST_WIDTH_HEIGHT);
+}
+
+
+static void w100fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
+ u32 h = area->height, w = area->width;
+ union dp_gui_master_cntl_u gmc;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_copyarea(info, area);
+ return;
+ }
+
+ gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL);
+ gmc.f.gmc_rop3 = ROP3_SRCCOPY;
+ gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE;
+ w100_fifo_wait(1);
+ writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
+
+ w100_fifo_wait(3);
+ writel((sy << 16) | (sx & 0xffff), remapped_regs + mmSRC_Y_X);
+ writel((dy << 16) | (dx & 0xffff), remapped_regs + mmDST_Y_X);
+ writel((w << 16) | (h & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT);
+}
+
+
/*
* Change the resolution by calling the appropriate hardware functions
*/
@@ -265,6 +416,7 @@ static void w100fb_activate_var(struct w100fb_par *par)
w100_init_lcd(par);
w100_set_dispregs(par);
w100_update_enable();
+ w100_init_graphic_engine(par);
calc_hsync(par);
@@ -394,9 +546,10 @@ static struct fb_ops w100fb_ops = {
.fb_set_par = w100fb_set_par,
.fb_setcolreg = w100fb_setcolreg,
.fb_blank = w100fb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
+ .fb_fillrect = w100fb_fillrect,
+ .fb_copyarea = w100fb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_sync = w100fb_sync,
};
#ifdef CONFIG_PM
@@ -543,7 +696,8 @@ int __init w100fb_probe(struct platform_device *pdev)
}
info->fbops = &w100fb_ops;
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT;
info->node = -1;
info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE);
info->screen_size = REMAPPED_FB_LEN;
diff --git a/drivers/video/w100fb.h b/drivers/video/w100fb.h
index 7a58a1e3e427c..fffae7b4f6e98 100644
--- a/drivers/video/w100fb.h
+++ b/drivers/video/w100fb.h
@@ -122,15 +122,32 @@
/* Block DISPLAY End: */
/* Block GFX Start: */
+#define mmDST_OFFSET 0x1004
+#define mmDST_PITCH 0x1008
+#define mmDST_Y_X 0x1038
+#define mmDST_WIDTH_HEIGHT 0x1198
+#define mmDP_GUI_MASTER_CNTL 0x106C
#define mmBRUSH_OFFSET 0x108C
#define mmBRUSH_Y_X 0x1074
+#define mmDP_BRUSH_FRGD_CLR 0x107C
+#define mmSRC_OFFSET 0x11AC
+#define mmSRC_PITCH 0x11B0
+#define mmSRC_Y_X 0x1034
#define mmDEFAULT_PITCH_OFFSET 0x10A0
#define mmDEFAULT_SC_BOTTOM_RIGHT 0x10A8
#define mmDEFAULT2_SC_BOTTOM_RIGHT 0x10AC
+#define mmSC_TOP_LEFT 0x11BC
+#define mmSC_BOTTOM_RIGHT 0x11C0
+#define mmSRC_SC_BOTTOM_RIGHT 0x11C4
#define mmGLOBAL_ALPHA 0x1210
#define mmFILTER_COEF 0x1214
#define mmMVC_CNTL_START 0x11E0
#define mmE2_ARITHMETIC_CNTL 0x1220
+#define mmDP_CNTL 0x11C8
+#define mmDP_CNTL_DST_DIR 0x11CC
+#define mmDP_DATATYPE 0x12C4
+#define mmDP_MIX 0x12C8
+#define mmDP_WRITE_MSK 0x12CC
#define mmENG_CNTL 0x13E8
#define mmENG_PERF_CNT 0x13F0
/* Block GFX End: */
@@ -179,6 +196,7 @@
/* Block RBBM Start: */
#define mmWAIT_UNTIL 0x1400
#define mmISYNC_CNTL 0x1404
+#define mmRBBM_STATUS 0x0140
#define mmRBBM_CNTL 0x0144
#define mmNQWAIT_UNTIL 0x0150
/* Block RBBM End: */
@@ -225,147 +243,147 @@
/* Register structure definitions */
struct wrap_top_dir_t {
- unsigned long top_addr : 23;
- unsigned long : 9;
+ u32 top_addr : 23;
+ u32 : 9;
} __attribute__((packed));
union wrap_top_dir_u {
- unsigned long val : 32;
+ u32 val : 32;
struct wrap_top_dir_t f;
} __attribute__((packed));
struct wrap_start_dir_t {
- unsigned long start_addr : 23;
- unsigned long : 9;
+ u32 start_addr : 23;
+ u32 : 9;
} __attribute__((packed));
union wrap_start_dir_u {
- unsigned long val : 32;
+ u32 val : 32;
struct wrap_start_dir_t f;
} __attribute__((packed));
struct cif_cntl_t {
- unsigned long swap_reg : 2;
- unsigned long swap_fbuf_1 : 2;
- unsigned long swap_fbuf_2 : 2;
- unsigned long swap_fbuf_3 : 2;
- unsigned long pmi_int_disable : 1;
- unsigned long pmi_schmen_disable : 1;
- unsigned long intb_oe : 1;
- unsigned long en_wait_to_compensate_dq_prop_dly : 1;
- unsigned long compensate_wait_rd_size : 2;
- unsigned long wait_asserted_timeout_val : 2;
- unsigned long wait_masked_val : 2;
- unsigned long en_wait_timeout : 1;
- unsigned long en_one_clk_setup_before_wait : 1;
- unsigned long interrupt_active_high : 1;
- unsigned long en_overwrite_straps : 1;
- unsigned long strap_wait_active_hi : 1;
- unsigned long lat_busy_count : 2;
- unsigned long lat_rd_pm4_sclk_busy : 1;
- unsigned long dis_system_bits : 1;
- unsigned long dis_mr : 1;
- unsigned long cif_spare_1 : 4;
+ u32 swap_reg : 2;
+ u32 swap_fbuf_1 : 2;
+ u32 swap_fbuf_2 : 2;
+ u32 swap_fbuf_3 : 2;
+ u32 pmi_int_disable : 1;
+ u32 pmi_schmen_disable : 1;
+ u32 intb_oe : 1;
+ u32 en_wait_to_compensate_dq_prop_dly : 1;
+ u32 compensate_wait_rd_size : 2;
+ u32 wait_asserted_timeout_val : 2;
+ u32 wait_masked_val : 2;
+ u32 en_wait_timeout : 1;
+ u32 en_one_clk_setup_before_wait : 1;
+ u32 interrupt_active_high : 1;
+ u32 en_overwrite_straps : 1;
+ u32 strap_wait_active_hi : 1;
+ u32 lat_busy_count : 2;
+ u32 lat_rd_pm4_sclk_busy : 1;
+ u32 dis_system_bits : 1;
+ u32 dis_mr : 1;
+ u32 cif_spare_1 : 4;
} __attribute__((packed));
union cif_cntl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct cif_cntl_t f;
} __attribute__((packed));
struct cfgreg_base_t {
- unsigned long cfgreg_base : 24;
- unsigned long : 8;
+ u32 cfgreg_base : 24;
+ u32 : 8;
} __attribute__((packed));
union cfgreg_base_u {
- unsigned long val : 32;
+ u32 val : 32;
struct cfgreg_base_t f;
} __attribute__((packed));
struct cif_io_t {
- unsigned long dq_srp : 1;
- unsigned long dq_srn : 1;
- unsigned long dq_sp : 4;
- unsigned long dq_sn : 4;
- unsigned long waitb_srp : 1;
- unsigned long waitb_srn : 1;
- unsigned long waitb_sp : 4;
- unsigned long waitb_sn : 4;
- unsigned long intb_srp : 1;
- unsigned long intb_srn : 1;
- unsigned long intb_sp : 4;
- unsigned long intb_sn : 4;
- unsigned long : 2;
+ u32 dq_srp : 1;
+ u32 dq_srn : 1;
+ u32 dq_sp : 4;
+ u32 dq_sn : 4;
+ u32 waitb_srp : 1;
+ u32 waitb_srn : 1;
+ u32 waitb_sp : 4;
+ u32 waitb_sn : 4;
+ u32 intb_srp : 1;
+ u32 intb_srn : 1;
+ u32 intb_sp : 4;
+ u32 intb_sn : 4;
+ u32 : 2;
} __attribute__((packed));
union cif_io_u {
- unsigned long val : 32;
+ u32 val : 32;
struct cif_io_t f;
} __attribute__((packed));
struct cif_read_dbg_t {
- unsigned long unpacker_pre_fetch_trig_gen : 2;
- unsigned long dly_second_rd_fetch_trig : 1;
- unsigned long rst_rd_burst_id : 1;
- unsigned long dis_rd_burst_id : 1;
- unsigned long en_block_rd_when_packer_is_not_emp : 1;
- unsigned long dis_pre_fetch_cntl_sm : 1;
- unsigned long rbbm_chrncy_dis : 1;
- unsigned long rbbm_rd_after_wr_lat : 2;
- unsigned long dis_be_during_rd : 1;
- unsigned long one_clk_invalidate_pulse : 1;
- unsigned long dis_chnl_priority : 1;
- unsigned long rst_read_path_a_pls : 1;
- unsigned long rst_read_path_b_pls : 1;
- unsigned long dis_reg_rd_fetch_trig : 1;
- unsigned long dis_rd_fetch_trig_from_ind_addr : 1;
- unsigned long dis_rd_same_byte_to_trig_fetch : 1;
- unsigned long dis_dir_wrap : 1;
- unsigned long dis_ring_buf_to_force_dec : 1;
- unsigned long dis_addr_comp_in_16bit : 1;
- unsigned long clr_w : 1;
- unsigned long err_rd_tag_is_3 : 1;
- unsigned long err_load_when_ful_a : 1;
- unsigned long err_load_when_ful_b : 1;
- unsigned long : 7;
+ u32 unpacker_pre_fetch_trig_gen : 2;
+ u32 dly_second_rd_fetch_trig : 1;
+ u32 rst_rd_burst_id : 1;
+ u32 dis_rd_burst_id : 1;
+ u32 en_block_rd_when_packer_is_not_emp : 1;
+ u32 dis_pre_fetch_cntl_sm : 1;
+ u32 rbbm_chrncy_dis : 1;
+ u32 rbbm_rd_after_wr_lat : 2;
+ u32 dis_be_during_rd : 1;
+ u32 one_clk_invalidate_pulse : 1;
+ u32 dis_chnl_priority : 1;
+ u32 rst_read_path_a_pls : 1;
+ u32 rst_read_path_b_pls : 1;
+ u32 dis_reg_rd_fetch_trig : 1;
+ u32 dis_rd_fetch_trig_from_ind_addr : 1;
+ u32 dis_rd_same_byte_to_trig_fetch : 1;
+ u32 dis_dir_wrap : 1;
+ u32 dis_ring_buf_to_force_dec : 1;
+ u32 dis_addr_comp_in_16bit : 1;
+ u32 clr_w : 1;
+ u32 err_rd_tag_is_3 : 1;
+ u32 err_load_when_ful_a : 1;
+ u32 err_load_when_ful_b : 1;
+ u32 : 7;
} __attribute__((packed));
union cif_read_dbg_u {
- unsigned long val : 32;
+ u32 val : 32;
struct cif_read_dbg_t f;
} __attribute__((packed));
struct cif_write_dbg_t {
- unsigned long packer_timeout_count : 2;
- unsigned long en_upper_load_cond : 1;
- unsigned long en_chnl_change_cond : 1;
- unsigned long dis_addr_comp_cond : 1;
- unsigned long dis_load_same_byte_addr_cond : 1;
- unsigned long dis_timeout_cond : 1;
- unsigned long dis_timeout_during_rbbm : 1;
- unsigned long dis_packer_ful_during_rbbm_timeout : 1;
- unsigned long en_dword_split_to_rbbm : 1;
- unsigned long en_dummy_val : 1;
- unsigned long dummy_val_sel : 1;
- unsigned long mask_pm4_wrptr_dec : 1;
- unsigned long dis_mc_clean_cond : 1;
- unsigned long err_two_reqi_during_ful : 1;
- unsigned long err_reqi_during_idle_clk : 1;
- unsigned long err_global : 1;
- unsigned long en_wr_buf_dbg_load : 1;
- unsigned long en_wr_buf_dbg_path : 1;
- unsigned long sel_wr_buf_byte : 3;
- unsigned long dis_rd_flush_wr : 1;
- unsigned long dis_packer_ful_cond : 1;
- unsigned long dis_invalidate_by_ops_chnl : 1;
- unsigned long en_halt_when_reqi_err : 1;
- unsigned long cif_spare_2 : 5;
- unsigned long : 1;
+ u32 packer_timeout_count : 2;
+ u32 en_upper_load_cond : 1;
+ u32 en_chnl_change_cond : 1;
+ u32 dis_addr_comp_cond : 1;
+ u32 dis_load_same_byte_addr_cond : 1;
+ u32 dis_timeout_cond : 1;
+ u32 dis_timeout_during_rbbm : 1;
+ u32 dis_packer_ful_during_rbbm_timeout : 1;
+ u32 en_dword_split_to_rbbm : 1;
+ u32 en_dummy_val : 1;
+ u32 dummy_val_sel : 1;
+ u32 mask_pm4_wrptr_dec : 1;
+ u32 dis_mc_clean_cond : 1;
+ u32 err_two_reqi_during_ful : 1;
+ u32 err_reqi_during_idle_clk : 1;
+ u32 err_global : 1;
+ u32 en_wr_buf_dbg_load : 1;
+ u32 en_wr_buf_dbg_path : 1;
+ u32 sel_wr_buf_byte : 3;
+ u32 dis_rd_flush_wr : 1;
+ u32 dis_packer_ful_cond : 1;
+ u32 dis_invalidate_by_ops_chnl : 1;
+ u32 en_halt_when_reqi_err : 1;
+ u32 cif_spare_2 : 5;
+ u32 : 1;
} __attribute__((packed));
union cif_write_dbg_u {
- unsigned long val : 32;
+ u32 val : 32;
struct cif_write_dbg_t f;
} __attribute__((packed));
@@ -403,327 +421,327 @@ union cpu_defaults_u {
} __attribute__((packed));
struct crtc_total_t {
- unsigned long crtc_h_total : 10;
- unsigned long : 6;
- unsigned long crtc_v_total : 10;
- unsigned long : 6;
+ u32 crtc_h_total : 10;
+ u32 : 6;
+ u32 crtc_v_total : 10;
+ u32 : 6;
} __attribute__((packed));
union crtc_total_u {
- unsigned long val : 32;
+ u32 val : 32;
struct crtc_total_t f;
} __attribute__((packed));
struct crtc_ss_t {
- unsigned long ss_start : 10;
- unsigned long : 6;
- unsigned long ss_end : 10;
- unsigned long : 2;
- unsigned long ss_align : 1;
- unsigned long ss_pol : 1;
- unsigned long ss_run_mode : 1;
- unsigned long ss_en : 1;
+ u32 ss_start : 10;
+ u32 : 6;
+ u32 ss_end : 10;
+ u32 : 2;
+ u32 ss_align : 1;
+ u32 ss_pol : 1;
+ u32 ss_run_mode : 1;
+ u32 ss_en : 1;
} __attribute__((packed));
union crtc_ss_u {
- unsigned long val : 32;
+ u32 val : 32;
struct crtc_ss_t f;
} __attribute__((packed));
struct active_h_disp_t {
- unsigned long active_h_start : 10;
- unsigned long : 6;
- unsigned long active_h_end : 10;
- unsigned long : 6;
+ u32 active_h_start : 10;
+ u32 : 6;
+ u32 active_h_end : 10;
+ u32 : 6;
} __attribute__((packed));
union active_h_disp_u {
- unsigned long val : 32;
+ u32 val : 32;
struct active_h_disp_t f;
} __attribute__((packed));
struct active_v_disp_t {
- unsigned long active_v_start : 10;
- unsigned long : 6;
- unsigned long active_v_end : 10;
- unsigned long : 6;
+ u32 active_v_start : 10;
+ u32 : 6;
+ u32 active_v_end : 10;
+ u32 : 6;
} __attribute__((packed));
union active_v_disp_u {
- unsigned long val : 32;
+ u32 val : 32;
struct active_v_disp_t f;
} __attribute__((packed));
struct graphic_h_disp_t {
- unsigned long graphic_h_start : 10;
- unsigned long : 6;
- unsigned long graphic_h_end : 10;
- unsigned long : 6;
+ u32 graphic_h_start : 10;
+ u32 : 6;
+ u32 graphic_h_end : 10;
+ u32 : 6;
} __attribute__((packed));
union graphic_h_disp_u {
- unsigned long val : 32;
+ u32 val : 32;
struct graphic_h_disp_t f;
} __attribute__((packed));
struct graphic_v_disp_t {
- unsigned long graphic_v_start : 10;
- unsigned long : 6;
- unsigned long graphic_v_end : 10;
- unsigned long : 6;
+ u32 graphic_v_start : 10;
+ u32 : 6;
+ u32 graphic_v_end : 10;
+ u32 : 6;
} __attribute__((packed));
union graphic_v_disp_u{
- unsigned long val : 32;
+ u32 val : 32;
struct graphic_v_disp_t f;
} __attribute__((packed));
struct graphic_ctrl_t_w100 {
- unsigned long color_depth : 3;
- unsigned long portrait_mode : 2;
- unsigned long low_power_on : 1;
- unsigned long req_freq : 4;
- unsigned long en_crtc : 1;
- unsigned long en_graphic_req : 1;
- unsigned long en_graphic_crtc : 1;
- unsigned long total_req_graphic : 9;
- unsigned long lcd_pclk_on : 1;
- unsigned long lcd_sclk_on : 1;
- unsigned long pclk_running : 1;
- unsigned long sclk_running : 1;
- unsigned long : 6;
+ u32 color_depth : 3;
+ u32 portrait_mode : 2;
+ u32 low_power_on : 1;
+ u32 req_freq : 4;
+ u32 en_crtc : 1;
+ u32 en_graphic_req : 1;
+ u32 en_graphic_crtc : 1;
+ u32 total_req_graphic : 9;
+ u32 lcd_pclk_on : 1;
+ u32 lcd_sclk_on : 1;
+ u32 pclk_running : 1;
+ u32 sclk_running : 1;
+ u32 : 6;
} __attribute__((packed));
struct graphic_ctrl_t_w32xx {
- unsigned long color_depth : 3;
- unsigned long portrait_mode : 2;
- unsigned long low_power_on : 1;
- unsigned long req_freq : 4;
- unsigned long en_crtc : 1;
- unsigned long en_graphic_req : 1;
- unsigned long en_graphic_crtc : 1;
- unsigned long total_req_graphic : 10;
- unsigned long lcd_pclk_on : 1;
- unsigned long lcd_sclk_on : 1;
- unsigned long pclk_running : 1;
- unsigned long sclk_running : 1;
- unsigned long : 5;
+ u32 color_depth : 3;
+ u32 portrait_mode : 2;
+ u32 low_power_on : 1;
+ u32 req_freq : 4;
+ u32 en_crtc : 1;
+ u32 en_graphic_req : 1;
+ u32 en_graphic_crtc : 1;
+ u32 total_req_graphic : 10;
+ u32 lcd_pclk_on : 1;
+ u32 lcd_sclk_on : 1;
+ u32 pclk_running : 1;
+ u32 sclk_running : 1;
+ u32 : 5;
} __attribute__((packed));
union graphic_ctrl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct graphic_ctrl_t_w100 f_w100;
struct graphic_ctrl_t_w32xx f_w32xx;
} __attribute__((packed));
struct video_ctrl_t {
- unsigned long video_mode : 1;
- unsigned long keyer_en : 1;
- unsigned long en_video_req : 1;
- unsigned long en_graphic_req_video : 1;
- unsigned long en_video_crtc : 1;
- unsigned long video_hor_exp : 2;
- unsigned long video_ver_exp : 2;
- unsigned long uv_combine : 1;
- unsigned long total_req_video : 9;
- unsigned long video_ch_sel : 1;
- unsigned long video_portrait : 2;
- unsigned long yuv2rgb_en : 1;
- unsigned long yuv2rgb_option : 1;
- unsigned long video_inv_hor : 1;
- unsigned long video_inv_ver : 1;
- unsigned long gamma_sel : 2;
- unsigned long dis_limit : 1;
- unsigned long en_uv_hblend : 1;
- unsigned long rgb_gamma_sel : 2;
+ u32 video_mode : 1;
+ u32 keyer_en : 1;
+ u32 en_video_req : 1;
+ u32 en_graphic_req_video : 1;
+ u32 en_video_crtc : 1;
+ u32 video_hor_exp : 2;
+ u32 video_ver_exp : 2;
+ u32 uv_combine : 1;
+ u32 total_req_video : 9;
+ u32 video_ch_sel : 1;
+ u32 video_portrait : 2;
+ u32 yuv2rgb_en : 1;
+ u32 yuv2rgb_option : 1;
+ u32 video_inv_hor : 1;
+ u32 video_inv_ver : 1;
+ u32 gamma_sel : 2;
+ u32 dis_limit : 1;
+ u32 en_uv_hblend : 1;
+ u32 rgb_gamma_sel : 2;
} __attribute__((packed));
union video_ctrl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct video_ctrl_t f;
} __attribute__((packed));
struct disp_db_buf_cntl_rd_t {
- unsigned long en_db_buf : 1;
- unsigned long update_db_buf_done : 1;
- unsigned long db_buf_cntl : 6;
- unsigned long : 24;
+ u32 en_db_buf : 1;
+ u32 update_db_buf_done : 1;
+ u32 db_buf_cntl : 6;
+ u32 : 24;
} __attribute__((packed));
union disp_db_buf_cntl_rd_u {
- unsigned long val : 32;
+ u32 val : 32;
struct disp_db_buf_cntl_rd_t f;
} __attribute__((packed));
struct disp_db_buf_cntl_wr_t {
- unsigned long en_db_buf : 1;
- unsigned long update_db_buf : 1;
- unsigned long db_buf_cntl : 6;
- unsigned long : 24;
+ u32 en_db_buf : 1;
+ u32 update_db_buf : 1;
+ u32 db_buf_cntl : 6;
+ u32 : 24;
} __attribute__((packed));
union disp_db_buf_cntl_wr_u {
- unsigned long val : 32;
+ u32 val : 32;
struct disp_db_buf_cntl_wr_t f;
} __attribute__((packed));
struct gamma_value1_t {
- unsigned long gamma1 : 8;
- unsigned long gamma2 : 8;
- unsigned long gamma3 : 8;
- unsigned long gamma4 : 8;
+ u32 gamma1 : 8;
+ u32 gamma2 : 8;
+ u32 gamma3 : 8;
+ u32 gamma4 : 8;
} __attribute__((packed));
union gamma_value1_u {
- unsigned long val : 32;
+ u32 val : 32;
struct gamma_value1_t f;
} __attribute__((packed));
struct gamma_value2_t {
- unsigned long gamma5 : 8;
- unsigned long gamma6 : 8;
- unsigned long gamma7 : 8;
- unsigned long gamma8 : 8;
+ u32 gamma5 : 8;
+ u32 gamma6 : 8;
+ u32 gamma7 : 8;
+ u32 gamma8 : 8;
} __attribute__((packed));
union gamma_value2_u {
- unsigned long val : 32;
+ u32 val : 32;
struct gamma_value2_t f;
} __attribute__((packed));
struct gamma_slope_t {
- unsigned long slope1 : 3;
- unsigned long slope2 : 3;
- unsigned long slope3 : 3;
- unsigned long slope4 : 3;
- unsigned long slope5 : 3;
- unsigned long slope6 : 3;
- unsigned long slope7 : 3;
- unsigned long slope8 : 3;
- unsigned long : 8;
+ u32 slope1 : 3;
+ u32 slope2 : 3;
+ u32 slope3 : 3;
+ u32 slope4 : 3;
+ u32 slope5 : 3;
+ u32 slope6 : 3;
+ u32 slope7 : 3;
+ u32 slope8 : 3;
+ u32 : 8;
} __attribute__((packed));
union gamma_slope_u {
- unsigned long val : 32;
+ u32 val : 32;
struct gamma_slope_t f;
} __attribute__((packed));
struct mc_ext_mem_location_t {
- unsigned long mc_ext_mem_start : 16;
- unsigned long mc_ext_mem_top : 16;
+ u32 mc_ext_mem_start : 16;
+ u32 mc_ext_mem_top : 16;
} __attribute__((packed));
union mc_ext_mem_location_u {
- unsigned long val : 32;
+ u32 val : 32;
struct mc_ext_mem_location_t f;
} __attribute__((packed));
struct mc_fb_location_t {
- unsigned long mc_fb_start : 16;
- unsigned long mc_fb_top : 16;
+ u32 mc_fb_start : 16;
+ u32 mc_fb_top : 16;
} __attribute__((packed));
union mc_fb_location_u {
- unsigned long val : 32;
+ u32 val : 32;
struct mc_fb_location_t f;
} __attribute__((packed));
struct clk_pin_cntl_t {
- unsigned long osc_en : 1;
- unsigned long osc_gain : 5;
- unsigned long dont_use_xtalin : 1;
- unsigned long xtalin_pm_en : 1;
- unsigned long xtalin_dbl_en : 1;
- unsigned long : 7;
- unsigned long cg_debug : 16;
+ u32 osc_en : 1;
+ u32 osc_gain : 5;
+ u32 dont_use_xtalin : 1;
+ u32 xtalin_pm_en : 1;
+ u32 xtalin_dbl_en : 1;
+ u32 : 7;
+ u32 cg_debug : 16;
} __attribute__((packed));
union clk_pin_cntl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct clk_pin_cntl_t f;
} __attribute__((packed));
struct pll_ref_fb_div_t {
- unsigned long pll_ref_div : 4;
- unsigned long : 4;
- unsigned long pll_fb_div_int : 6;
- unsigned long : 2;
- unsigned long pll_fb_div_frac : 3;
- unsigned long : 1;
- unsigned long pll_reset_time : 4;
- unsigned long pll_lock_time : 8;
+ u32 pll_ref_div : 4;
+ u32 : 4;
+ u32 pll_fb_div_int : 6;
+ u32 : 2;
+ u32 pll_fb_div_frac : 3;
+ u32 : 1;
+ u32 pll_reset_time : 4;
+ u32 pll_lock_time : 8;
} __attribute__((packed));
union pll_ref_fb_div_u {
- unsigned long val : 32;
+ u32 val : 32;
struct pll_ref_fb_div_t f;
} __attribute__((packed));
struct pll_cntl_t {
- unsigned long pll_pwdn : 1;
- unsigned long pll_reset : 1;
- unsigned long pll_pm_en : 1;
- unsigned long pll_mode : 1;
- unsigned long pll_refclk_sel : 1;
- unsigned long pll_fbclk_sel : 1;
- unsigned long pll_tcpoff : 1;
- unsigned long pll_pcp : 3;
- unsigned long pll_pvg : 3;
- unsigned long pll_vcofr : 1;
- unsigned long pll_ioffset : 2;
- unsigned long pll_pecc_mode : 2;
- unsigned long pll_pecc_scon : 2;
- unsigned long pll_dactal : 4;
- unsigned long pll_cp_clip : 2;
- unsigned long pll_conf : 3;
- unsigned long pll_mbctrl : 2;
- unsigned long pll_ring_off : 1;
+ u32 pll_pwdn : 1;
+ u32 pll_reset : 1;
+ u32 pll_pm_en : 1;
+ u32 pll_mode : 1;
+ u32 pll_refclk_sel : 1;
+ u32 pll_fbclk_sel : 1;
+ u32 pll_tcpoff : 1;
+ u32 pll_pcp : 3;
+ u32 pll_pvg : 3;
+ u32 pll_vcofr : 1;
+ u32 pll_ioffset : 2;
+ u32 pll_pecc_mode : 2;
+ u32 pll_pecc_scon : 2;
+ u32 pll_dactal : 4;
+ u32 pll_cp_clip : 2;
+ u32 pll_conf : 3;
+ u32 pll_mbctrl : 2;
+ u32 pll_ring_off : 1;
} __attribute__((packed));
union pll_cntl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct pll_cntl_t f;
} __attribute__((packed));
struct sclk_cntl_t {
- unsigned long sclk_src_sel : 2;
- unsigned long : 2;
- unsigned long sclk_post_div_fast : 4;
- unsigned long sclk_clkon_hys : 3;
- unsigned long sclk_post_div_slow : 4;
- unsigned long disp_cg_ok2switch_en : 1;
- unsigned long sclk_force_reg : 1;
- unsigned long sclk_force_disp : 1;
- unsigned long sclk_force_mc : 1;
- unsigned long sclk_force_extmc : 1;
- unsigned long sclk_force_cp : 1;
- unsigned long sclk_force_e2 : 1;
- unsigned long sclk_force_e3 : 1;
- unsigned long sclk_force_idct : 1;
- unsigned long sclk_force_bist : 1;
- unsigned long busy_extend_cp : 1;
- unsigned long busy_extend_e2 : 1;
- unsigned long busy_extend_e3 : 1;
- unsigned long busy_extend_idct : 1;
- unsigned long : 3;
+ u32 sclk_src_sel : 2;
+ u32 : 2;
+ u32 sclk_post_div_fast : 4;
+ u32 sclk_clkon_hys : 3;
+ u32 sclk_post_div_slow : 4;
+ u32 disp_cg_ok2switch_en : 1;
+ u32 sclk_force_reg : 1;
+ u32 sclk_force_disp : 1;
+ u32 sclk_force_mc : 1;
+ u32 sclk_force_extmc : 1;
+ u32 sclk_force_cp : 1;
+ u32 sclk_force_e2 : 1;
+ u32 sclk_force_e3 : 1;
+ u32 sclk_force_idct : 1;
+ u32 sclk_force_bist : 1;
+ u32 busy_extend_cp : 1;
+ u32 busy_extend_e2 : 1;
+ u32 busy_extend_e3 : 1;
+ u32 busy_extend_idct : 1;
+ u32 : 3;
} __attribute__((packed));
union sclk_cntl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct sclk_cntl_t f;
} __attribute__((packed));
struct pclk_cntl_t {
- unsigned long pclk_src_sel : 2;
- unsigned long : 2;
- unsigned long pclk_post_div : 4;
- unsigned long : 8;
- unsigned long pclk_force_disp : 1;
- unsigned long : 15;
+ u32 pclk_src_sel : 2;
+ u32 : 2;
+ u32 pclk_post_div : 4;
+ u32 : 8;
+ u32 pclk_force_disp : 1;
+ u32 : 15;
} __attribute__((packed));
union pclk_cntl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct pclk_cntl_t f;
} __attribute__((packed));
@@ -735,36 +753,176 @@ union pclk_cntl_u {
#define TESTCLK_SRC_XTAL 0x06
struct clk_test_cntl_t {
- unsigned long testclk_sel : 4;
- unsigned long : 3;
- unsigned long start_check_freq : 1;
- unsigned long tstcount_rst : 1;
- unsigned long : 15;
- unsigned long test_count : 8;
+ u32 testclk_sel : 4;
+ u32 : 3;
+ u32 start_check_freq : 1;
+ u32 tstcount_rst : 1;
+ u32 : 15;
+ u32 test_count : 8;
} __attribute__((packed));
union clk_test_cntl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct clk_test_cntl_t f;
} __attribute__((packed));
struct pwrmgt_cntl_t {
- unsigned long pwm_enable : 1;
- unsigned long : 1;
- unsigned long pwm_mode_req : 2;
- unsigned long pwm_wakeup_cond : 2;
- unsigned long pwm_fast_noml_hw_en : 1;
- unsigned long pwm_noml_fast_hw_en : 1;
- unsigned long pwm_fast_noml_cond : 4;
- unsigned long pwm_noml_fast_cond : 4;
- unsigned long pwm_idle_timer : 8;
- unsigned long pwm_busy_timer : 8;
+ u32 pwm_enable : 1;
+ u32 : 1;
+ u32 pwm_mode_req : 2;
+ u32 pwm_wakeup_cond : 2;
+ u32 pwm_fast_noml_hw_en : 1;
+ u32 pwm_noml_fast_hw_en : 1;
+ u32 pwm_fast_noml_cond : 4;
+ u32 pwm_noml_fast_cond : 4;
+ u32 pwm_idle_timer : 8;
+ u32 pwm_busy_timer : 8;
} __attribute__((packed));
union pwrmgt_cntl_u {
- unsigned long val : 32;
+ u32 val : 32;
struct pwrmgt_cntl_t f;
} __attribute__((packed));
+#define SRC_DATATYPE_EQU_DST 3
+
+#define ROP3_SRCCOPY 0xcc
+#define ROP3_PATCOPY 0xf0
+
+#define GMC_BRUSH_SOLID_COLOR 13
+#define GMC_BRUSH_NONE 15
+
+#define DP_SRC_MEM_RECTANGULAR 2
+
+#define DP_OP_ROP 0
+
+struct dp_gui_master_cntl_t {
+ u32 gmc_src_pitch_offset_cntl : 1;
+ u32 gmc_dst_pitch_offset_cntl : 1;
+ u32 gmc_src_clipping : 1;
+ u32 gmc_dst_clipping : 1;
+ u32 gmc_brush_datatype : 4;
+ u32 gmc_dst_datatype : 4;
+ u32 gmc_src_datatype : 3;
+ u32 gmc_byte_pix_order : 1;
+ u32 gmc_default_sel : 1;
+ u32 gmc_rop3 : 8;
+ u32 gmc_dp_src_source : 3;
+ u32 gmc_clr_cmp_fcn_dis : 1;
+ u32 : 1;
+ u32 gmc_wr_msk_dis : 1;
+ u32 gmc_dp_op : 1;
+} __attribute__((packed));
+
+union dp_gui_master_cntl_u {
+ u32 val : 32;
+ struct dp_gui_master_cntl_t f;
+} __attribute__((packed));
+
+struct rbbm_status_t {
+ u32 cmdfifo_avail : 7;
+ u32 : 1;
+ u32 hirq_on_rbb : 1;
+ u32 cprq_on_rbb : 1;
+ u32 cfrq_on_rbb : 1;
+ u32 hirq_in_rtbuf : 1;
+ u32 cprq_in_rtbuf : 1;
+ u32 cfrq_in_rtbuf : 1;
+ u32 cf_pipe_busy : 1;
+ u32 eng_ev_busy : 1;
+ u32 cp_cmdstrm_busy : 1;
+ u32 e2_busy : 1;
+ u32 rb2d_busy : 1;
+ u32 rb3d_busy : 1;
+ u32 se_busy : 1;
+ u32 re_busy : 1;
+ u32 tam_busy : 1;
+ u32 tdm_busy : 1;
+ u32 pb_busy : 1;
+ u32 : 6;
+ u32 gui_active : 1;
+} __attribute__((packed));
+
+union rbbm_status_u {
+ u32 val : 32;
+ struct rbbm_status_t f;
+} __attribute__((packed));
+
+struct dp_datatype_t {
+ u32 dp_dst_datatype : 4;
+ u32 : 4;
+ u32 dp_brush_datatype : 4;
+ u32 dp_src2_type : 1;
+ u32 dp_src2_datatype : 3;
+ u32 dp_src_datatype : 3;
+ u32 : 11;
+ u32 dp_byte_pix_order : 1;
+ u32 : 1;
+} __attribute__((packed));
+
+union dp_datatype_u {
+ u32 val : 32;
+ struct dp_datatype_t f;
+} __attribute__((packed));
+
+struct dp_mix_t {
+ u32 : 8;
+ u32 dp_src_source : 3;
+ u32 dp_src2_source : 3;
+ u32 : 2;
+ u32 dp_rop3 : 8;
+ u32 dp_op : 1;
+ u32 : 7;
+} __attribute__((packed));
+
+union dp_mix_u {
+ u32 val : 32;
+ struct dp_mix_t f;
+} __attribute__((packed));
+
+struct eng_cntl_t {
+ u32 erc_reg_rd_ws : 1;
+ u32 erc_reg_wr_ws : 1;
+ u32 erc_idle_reg_wr : 1;
+ u32 dis_engine_triggers : 1;
+ u32 dis_rop_src_uses_dst_w_h : 1;
+ u32 dis_src_uses_dst_dirmaj : 1;
+ u32 : 6;
+ u32 force_3dclk_when_2dclk : 1;
+ u32 : 19;
+} __attribute__((packed));
+
+union eng_cntl_u {
+ u32 val : 32;
+ struct eng_cntl_t f;
+} __attribute__((packed));
+
+struct dp_cntl_t {
+ u32 dst_x_dir : 1;
+ u32 dst_y_dir : 1;
+ u32 src_x_dir : 1;
+ u32 src_y_dir : 1;
+ u32 dst_major_x : 1;
+ u32 src_major_x : 1;
+ u32 : 26;
+} __attribute__((packed));
+
+union dp_cntl_u {
+ u32 val : 32;
+ struct dp_cntl_t f;
+} __attribute__((packed));
+
+struct dp_cntl_dst_dir_t {
+ u32 : 15;
+ u32 dst_y_dir : 1;
+ u32 : 15;
+ u32 dst_x_dir : 1;
+} __attribute__((packed));
+
+union dp_cntl_dst_dir_u {
+ u32 val : 32;
+ struct dp_cntl_dst_dir_t f;
+} __attribute__((packed));
+
#endif
diff --git a/fs/Makefile b/fs/Makefile
index f3a4f70771754..83bf478e786b3 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -10,7 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \
ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \
attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \
seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \
- ioprio.o pnode.o drop_caches.o splice.o
+ ioprio.o pnode.o drop_caches.o splice.o sync.o
obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_EPOLL) += eventpoll.o
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 4e1b849f912f7..f3418f7a6e9d9 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/smp_lock.h>
#include <linux/devfs_fs_kernel.h>
+#include <linux/seq_file.h>
#include <linux/kobject.h>
#include <linux/kobj_map.h>
@@ -27,8 +28,6 @@
static struct kobj_map *cdev_map;
-#define MAX_PROBE_HASH 255 /* random */
-
static DEFINE_MUTEX(chrdevs_lock);
static struct char_device_struct {
@@ -39,93 +38,29 @@ static struct char_device_struct {
char name[64];
struct file_operations *fops;
struct cdev *cdev; /* will die */
-} *chrdevs[MAX_PROBE_HASH];
+} *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
/* index in the above */
static inline int major_to_index(int major)
{
- return major % MAX_PROBE_HASH;
-}
-
-struct chrdev_info {
- int index;
- struct char_device_struct *cd;
-};
-
-void *get_next_chrdev(void *dev)
-{
- struct chrdev_info *info;
-
- if (dev == NULL) {
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto out;
- info->index=0;
- info->cd = chrdevs[info->index];
- if (info->cd)
- goto out;
- } else {
- info = dev;
- }
-
- while (info->index < ARRAY_SIZE(chrdevs)) {
- if (info->cd)
- info->cd = info->cd->next;
- if (info->cd)
- goto out;
- /*
- * No devices on this chain, move to the next
- */
- info->index++;
- info->cd = (info->index < ARRAY_SIZE(chrdevs)) ?
- chrdevs[info->index] : NULL;
- if (info->cd)
- goto out;
- }
-
-out:
- return info;
-}
-
-void *acquire_chrdev_list(void)
-{
- mutex_lock(&chrdevs_lock);
- return get_next_chrdev(NULL);
-}
-
-void release_chrdev_list(void *dev)
-{
- mutex_unlock(&chrdevs_lock);
- kfree(dev);
+ return major % CHRDEV_MAJOR_HASH_SIZE;
}
+#ifdef CONFIG_PROC_FS
-int count_chrdev_list(void)
+void chrdev_show(struct seq_file *f, off_t offset)
{
struct char_device_struct *cd;
- int i, count;
-
- count = 0;
- for (i = 0; i < ARRAY_SIZE(chrdevs) ; i++) {
- for (cd = chrdevs[i]; cd; cd = cd->next)
- count++;
+ if (offset < CHRDEV_MAJOR_HASH_SIZE) {
+ mutex_lock(&chrdevs_lock);
+ for (cd = chrdevs[offset]; cd; cd = cd->next)
+ seq_printf(f, "%3d %s\n", cd->major, cd->name);
+ mutex_unlock(&chrdevs_lock);
}
-
- return count;
}
-int get_chrdev_info(void *dev, int *major, char **name)
-{
- struct chrdev_info *info = dev;
-
- if (info->cd == NULL)
- return 1;
-
- *major = info->cd->major;
- *name = info->cd->name;
- return 0;
-}
+#endif /* CONFIG_PROC_FS */
/*
* Register a single major with a specified minor range.
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index cb68efba35dbb..8a2de038882e3 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,21 @@
+Version 1.42
+------------
+Fix slow oplock break when mounted to different servers at the same time and
+the tids match and we try to find matching fid on wrong server.
+
+Version 1.41
+------------
+Fix NTLMv2 security (can be enabled in /proc/fs/cifs) so customers can
+configure stronger authentication. Fix sfu symlinks so they can
+be followed (not just recognized). Fix wraparound of bcc on
+read responses when buffer size over 64K and also fix wrap of
+max smb buffer size when CIFSMaxBufSize over 64K. Fix oops in
+cifs_user_read and cifs_readpages (when EAGAIN on send of smb
+on socket is returned over and over). Add POSIX (advisory) byte range
+locking support (requires server with newest CIFS UNIX Extensions
+to the protocol implemented). Slow down negprot slightly in port 139
+RFC1001 case to give session_init time on buggy servers.
+
Version 1.40
------------
Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 7384947a0f932..58c77254a23b3 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -3,4 +3,4 @@
#
obj-$(CONFIG_CIFS) += cifs.o
-cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o
+cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o ntlmssp.o
diff --git a/fs/cifs/README b/fs/cifs/README
index b0070d1b149d6..b2b4d0803761b 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -422,6 +422,13 @@ A partial list of the supported mount options follows:
nomapchars Do not translate any of these seven characters (default).
nocase Request case insensitive path name matching (case
sensitive is the default if the server suports it).
+ posixpaths If CIFS Unix extensions are supported, attempt to
+ negotiate posix path name support which allows certain
+ characters forbidden in typical CIFS filenames, without
+ requiring remapping. (default)
+ noposixpaths If CIFS Unix extensions are supported, do not request
+ posix path name support (this may cause servers to
+ reject creatingfile with certain reserved characters).
nobrl Do not send byte range lock requests to the server.
This is necessary for certain applications that break
with cifs style mandatory byte range locks (and most
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a2c24858d40f9..e7d63737e6511 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsencrypt.c
*
- * Copyright (C) International Business Machines Corp., 2005
+ * Copyright (C) International Business Machines Corp., 2005,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -36,7 +36,8 @@
extern void mdfour(unsigned char *out, unsigned char *in, int n);
extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
-static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, const char * key, char * signature)
+static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
+ const char * key, char * signature)
{
struct MD5Context context;
@@ -56,9 +57,6 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
int rc = 0;
char smb_signature[20];
- /* BB remember to initialize sequence number elsewhere and initialize mac_signing key elsewhere BB */
- /* BB remember to add code to save expected sequence number in midQ entry BB */
-
if((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
@@ -85,20 +83,33 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
const char * key, char * signature)
{
- struct MD5Context context;
-
- if((iov == NULL) || (signature == NULL))
- return -EINVAL;
+ struct MD5Context context;
+ int i;
- MD5Init(&context);
- MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+ if((iov == NULL) || (signature == NULL))
+ return -EINVAL;
-/* MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); */ /* BB FIXME BB */
+ MD5Init(&context);
+ MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+ for(i=0;i<n_vec;i++) {
+ if(iov[i].iov_base == NULL) {
+ cERROR(1,("null iovec entry"));
+ return -EIO;
+ } else if(iov[i].iov_len == 0)
+ break; /* bail out if we are sent nothing to sign */
+ /* The first entry includes a length field (which does not get
+ signed that occupies the first 4 bytes before the header */
+ if(i==0) {
+ if (iov[0].iov_len <= 8 ) /* cmd field at offset 9 */
+ break; /* nothing to sign or corrupt header */
+ MD5Update(&context,iov[0].iov_base+4, iov[0].iov_len-4);
+ } else
+ MD5Update(&context,iov[i].iov_base, iov[i].iov_len);
+ }
- MD5Final(signature,&context);
+ MD5Final(signature,&context);
- return -EOPNOTSUPP;
-/* return 0; */
+ return 0;
}
@@ -259,4 +270,5 @@ void CalcNTLMv2_response(const struct cifsSesInfo * ses,char * v2_session_respon
/* hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */
hmac_md5_final(v2_session_response,&context);
+ cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); /* BB removeme BB */
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bbc544857bcc..d4b713e5affbd 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -93,13 +93,10 @@ cifs_read_super(struct super_block *sb, void *data,
int rc = 0;
sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
- sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
+ sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
cifs_sb = CIFS_SB(sb);
if(cifs_sb == NULL)
return -ENOMEM;
- else
- memset(cifs_sb,0,sizeof(struct cifs_sb_info));
-
rc = cifs_mount(sb, cifs_sb, data, devname);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 74f405ae4da34..4e829dc672a64 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -99,5 +99,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
extern int cifs_ioctl (struct inode * inode, struct file * filep,
unsigned int command, unsigned long arg);
-#define CIFS_VERSION "1.40"
+#define CIFS_VERSION "1.42"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 7bed27601ce59..006eb33bff5ff 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsglob.h
*
- * Copyright (C) International Business Machines Corp., 2002,2005
+ * Copyright (C) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -430,6 +430,15 @@ struct dir_notify_req {
#define CIFS_LARGE_BUFFER 2
#define CIFS_IOVEC 4 /* array of response buffers */
+/* Type of session setup needed */
+#define CIFS_PLAINTEXT 0
+#define CIFS_LANMAN 1
+#define CIFS_NTLM 2
+#define CIFS_NTLMSSP_NEG 3
+#define CIFS_NTLMSSP_AUTH 4
+#define CIFS_SPNEGO_INIT 5
+#define CIFS_SPNEGO_TARG 6
+
/*
*****************************************************************
* All constants go here
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index cc2471094ca58..b2233ac05bd27 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -859,7 +859,10 @@ typedef struct smb_com_lock_req {
LOCKING_ANDX_RANGE Locks[1];
} __attribute__((packed)) LOCK_REQ;
-
+/* lock type */
+#define CIFS_RDLCK 0
+#define CIFS_WRLCK 1
+#define CIFS_UNLCK 2
typedef struct cifs_posix_lock {
__le16 lock_type; /* 0 = Read, 1 = Write, 2 = Unlock */
__le16 lock_flags; /* 1 = Wait (only valid for setlock) */
@@ -1786,7 +1789,13 @@ typedef struct {
#define CIFS_UNIX_POSIX_ACL_CAP 0x00000002 /* support getfacl/setfacl */
#define CIFS_UNIX_XATTR_CAP 0x00000004 /* support new namespace */
#define CIFS_UNIX_EXTATTR_CAP 0x00000008 /* support chattr/chflag */
-#define CIFS_UNIX_POSIX_PATHNAMES_CAP 0x00000010 /* Use POSIX pathnames on the wire. */
+#define CIFS_UNIX_POSIX_PATHNAMES_CAP 0x00000010 /* Allow POSIX path chars */
+#ifdef CONFIG_CIFS_POSIX
+#define CIFS_UNIX_CAP_MASK 0x0000001b
+#else
+#define CIFS_UNIX_CAP_MASK 0x00000013
+#endif /* CONFIG_CIFS_POSIX */
+
#define CIFS_POSIX_EXTENSIONS 0x00000010 /* support for new QFSInfo */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 7b25463d3c14b..2879ba343ca7a 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsproto.h
*
- * Copyright (c) International Business Machines Corp., 2002,2005
+ * Copyright (c) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -64,6 +64,14 @@ extern int map_smb_to_linux_error(struct smb_hdr *smb);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
fixed section (word count) in two byte units */);
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
+ struct cifsSesInfo *ses,
+ void ** request_buf);
+extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+ const int stage, int * pNTLMv2_flg,
+ const struct nls_table *nls_cp);
+#endif
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16,
struct cifsTconInfo *);
@@ -257,7 +265,10 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
const int waitFlag);
-
+extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+ const __u16 smb_file_id, const int get_flag,
+ const __u64 len, const __u64 offset,
+ const __u16 lock_type, const int waitFlag);
extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a243fe2792d5e..d705500aa283a 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifssmb.c
*
- * Copyright (C) International Business Machines Corp., 2002,2005
+ * Copyright (C) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Contains the routines for constructing the SMB PDUs themselves
@@ -186,7 +186,35 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
cifs_stats_inc(&tcon->num_smbs_sent);
return rc;
-}
+}
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+int
+small_smb_init_no_tc(const int smb_command, const int wct,
+ struct cifsSesInfo *ses, void **request_buf)
+{
+ int rc;
+ struct smb_hdr * buffer;
+
+ rc = small_smb_init(smb_command, wct, NULL, request_buf);
+ if(rc)
+ return rc;
+
+ buffer = (struct smb_hdr *)*request_buf;
+ buffer->Mid = GetNextMid(ses->server);
+ if (ses->capabilities & CAP_UNICODE)
+ buffer->Flags2 |= SMBFLG2_UNICODE;
+ if (ses->capabilities & CAP_STATUS32)
+ buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+
+ /* uid, tid can stay at zero as set in header assemble */
+
+ /* BB add support for turning on the signing when
+ this function is used after 1st of session setup requests */
+
+ return rc;
+}
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
/* If the return code is zero, this function must fill in request_buf pointer */
static int
@@ -1042,7 +1070,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
}
}
- cifs_small_buf_release(pSMB);
+/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
if(*buf) {
if(resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
@@ -1246,7 +1274,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
*nbytes += le16_to_cpu(pSMBr->Count);
}
- cifs_small_buf_release(pSMB);
+/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
if(resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
else if(resp_buf_type == CIFS_LARGE_BUFFER)
@@ -1325,6 +1353,85 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
}
int
+CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+ const __u16 smb_file_id, const int get_flag, const __u64 len,
+ const __u64 lkoffset, const __u16 lock_type, const int waitFlag)
+{
+ struct smb_com_transaction2_sfi_req *pSMB = NULL;
+ struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+ char *data_offset;
+ struct cifs_posix_lock *parm_data;
+ int rc = 0;
+ int bytes_returned = 0;
+ __u16 params, param_offset, offset, byte_count, count;
+
+ cFYI(1, ("Posix Lock"));
+ rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
+
+ if (rc)
+ return rc;
+
+ pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
+
+ params = 6;
+ pSMB->MaxSetupCount = 0;
+ pSMB->Reserved = 0;
+ pSMB->Flags = 0;
+ pSMB->Timeout = 0;
+ pSMB->Reserved2 = 0;
+ param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+ offset = param_offset + params;
+
+ data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+
+ count = sizeof(struct cifs_posix_lock);
+ pSMB->MaxParameterCount = cpu_to_le16(2);
+ pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+ pSMB->SetupCount = 1;
+ pSMB->Reserved3 = 0;
+ if(get_flag)
+ pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
+ else
+ pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+ byte_count = 3 /* pad */ + params + count;
+ pSMB->DataCount = cpu_to_le16(count);
+ pSMB->ParameterCount = cpu_to_le16(params);
+ pSMB->TotalDataCount = pSMB->DataCount;
+ pSMB->TotalParameterCount = pSMB->ParameterCount;
+ pSMB->ParameterOffset = cpu_to_le16(param_offset);
+ parm_data = (struct cifs_posix_lock *)
+ (((char *) &pSMB->hdr.Protocol) + offset);
+
+ parm_data->lock_type = cpu_to_le16(lock_type);
+ if(waitFlag)
+ parm_data->lock_flags = 1;
+ parm_data->pid = cpu_to_le32(current->tgid);
+ parm_data->start = lkoffset;
+ parm_data->length = len; /* normalize negative numbers */
+
+ pSMB->DataOffset = cpu_to_le16(offset);
+ pSMB->Fid = smb_file_id;
+ pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK);
+ pSMB->Reserved4 = 0;
+ pSMB->hdr.smb_buf_length += byte_count;
+ pSMB->ByteCount = cpu_to_le16(byte_count);
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc) {
+ cFYI(1, ("Send error in Posix Lock = %d", rc));
+ }
+
+ if (pSMB)
+ cifs_small_buf_release(pSMB);
+
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
+ since file handle passed in no longer valid */
+
+ return rc;
+}
+
+
+int
CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
{
int rc = 0;
@@ -2578,7 +2685,7 @@ qsec_out:
cifs_small_buf_release(iov[0].iov_base);
else if(buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
- cifs_small_buf_release(pSMB);
+/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
return rc;
}
@@ -2954,7 +3061,8 @@ findFirstRetry:
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(
- offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4);
+ offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes)
+ - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */
@@ -2977,12 +3085,12 @@ findFirstRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_ffirst);
- if (rc) {/* BB add logic to retry regular search if Unix search rejected unexpectedly by server */
+ if (rc) {/* BB add logic to retry regular search if Unix search
+ rejected unexpectedly by server */
/* BB Add code to handle unsupported level rc */
cFYI(1, ("Error in FindFirst = %d", rc));
- if (pSMB)
- cifs_buf_release(pSMB);
+ cifs_buf_release(pSMB);
/* BB eventually could optimize out free and realloc of buf */
/* for this case */
@@ -2998,6 +3106,7 @@ findFirstRetry:
psrch_inf->unicode = FALSE;
psrch_inf->ntwrk_buf_start = (char *)pSMBr;
+ psrch_inf->smallBuf = 0;
psrch_inf->srch_entries_start =
(char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
@@ -3118,9 +3227,14 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
parms = (T2_FNEXT_RSP_PARMS *)response_data;
response_data = (char *)&pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
- cifs_buf_release(psrch_inf->ntwrk_buf_start);
+ if(psrch_inf->smallBuf)
+ cifs_small_buf_release(
+ psrch_inf->ntwrk_buf_start);
+ else
+ cifs_buf_release(psrch_inf->ntwrk_buf_start);
psrch_inf->srch_entries_start = response_data;
psrch_inf->ntwrk_buf_start = (char *)pSMB;
+ psrch_inf->smallBuf = 0;
if(parms->EndofSearch)
psrch_inf->endOfSearch = TRUE;
else
@@ -3834,6 +3948,7 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
cFYI(1, ("In SETFSUnixInfo"));
SETFSUnixRetry:
+ /* BB switch to small buf init to save memory */
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2a0c1f4ca0ae9..0b86d5ca90143 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/connect.c
*
- * Copyright (C) International Business Machines Corp., 2002,2005
+ * Copyright (C) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -564,7 +564,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
dump_smb(smb_buffer, length);
- if (checkSMB (smb_buffer, smb_buffer->Mid, total_read+4)) {
+ if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
continue;
}
@@ -1476,6 +1476,14 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
rc = smb_send(*csocket, smb_buf, 0x44,
(struct sockaddr *)psin_server);
kfree(ses_init_buf);
+ msleep(1); /* RFC1001 layer in at least one server
+ requires very short break before negprot
+ presumably because not expecting negprot
+ to follow so fast. This is a simple
+ solution that works without
+ complicating the code and causes no
+ significant slowing down on mount
+ for everyone else */
}
/* else the negprot may still work without this
even though malloc failed */
@@ -1920,27 +1928,34 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cifs_sb->tcon = tcon;
tcon->ses = pSesInfo;
- /* do not care if following two calls succeed - informational only */
+ /* do not care if following two calls succeed - informational */
CIFSSMBQFSDeviceInfo(xid, tcon);
CIFSSMBQFSAttributeInfo(xid, tcon);
+
if (tcon->ses->capabilities & CAP_UNIX) {
if(!CIFSSMBQFSUnixInfo(xid, tcon)) {
- if(!volume_info.no_psx_acl) {
- if(CIFS_UNIX_POSIX_ACL_CAP &
- le64_to_cpu(tcon->fsUnixInfo.Capability))
- cFYI(1,("server negotiated posix acl support"));
- sb->s_flags |= MS_POSIXACL;
+ __u64 cap =
+ le64_to_cpu(tcon->fsUnixInfo.Capability);
+ cap &= CIFS_UNIX_CAP_MASK;
+ if(volume_info.no_psx_acl)
+ cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
+ else if(CIFS_UNIX_POSIX_ACL_CAP & cap) {
+ cFYI(1,("negotiated posix acl support"));
+ sb->s_flags |= MS_POSIXACL;
}
- /* Try and negotiate POSIX pathnames if we can. */
- if (volume_info.posix_paths && (CIFS_UNIX_POSIX_PATHNAMES_CAP &
- le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- if (!CIFSSMBSetFSUnixInfo(xid, tcon, CIFS_UNIX_POSIX_PATHNAMES_CAP)) {
- cFYI(1,("negotiated posix pathnames support"));
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
- } else {
- cFYI(1,("posix pathnames support requested but not supported"));
- }
+ if(volume_info.posix_paths == 0)
+ cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
+ else if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
+ cFYI(1,("negotiate posix pathnames"));
+ cifs_sb->mnt_cifs_flags |=
+ CIFS_MOUNT_POSIX_PATHS;
+ }
+
+ cFYI(1,("Negotiate caps 0x%x",(int)cap));
+
+ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
+ cFYI(1,("setting capabilities failed"));
}
}
}
@@ -2278,6 +2293,8 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
smb_buffer->Mid = GetNextMid(ses->server);
pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
pSMB->req.AndXCommand = 0xFF;
+ if(ses->server->maxBuf > 64*1024)
+ ses->server->maxBuf = (64*1023);
pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
@@ -2525,7 +2542,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
__u32 negotiate_flags, capabilities;
__u16 count;
- cFYI(1, ("In NTLMSSP sesssetup (negotiate) "));
+ cFYI(1, ("In NTLMSSP sesssetup (negotiate)"));
if(ses == NULL)
return -EINVAL;
domain = ses->domainName;
@@ -2575,7 +2592,8 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
SecurityBlob->MessageType = NtLmNegotiate;
negotiate_flags =
NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_OEM |
- NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM | 0x80000000 |
+ NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM |
+ NTLMSSP_NEGOTIATE_56 |
/* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
if(sign_CIFS_PDUs)
negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -2588,26 +2606,11 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
SecurityBlob->WorkstationName.Length = 0;
SecurityBlob->WorkstationName.MaximumLength = 0;
- if (domain == NULL) {
- SecurityBlob->DomainName.Buffer = 0;
- SecurityBlob->DomainName.Length = 0;
- SecurityBlob->DomainName.MaximumLength = 0;
- } else {
- __u16 len;
- negotiate_flags |= NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED;
- strncpy(bcc_ptr, domain, 63);
- len = strnlen(domain, 64);
- SecurityBlob->DomainName.MaximumLength =
- cpu_to_le16(len);
- SecurityBlob->DomainName.Buffer =
- cpu_to_le32((long) &SecurityBlob->
- DomainString -
- (long) &SecurityBlob->Signature);
- bcc_ptr += len;
- SecurityBlobLength += len;
- SecurityBlob->DomainName.Length =
- cpu_to_le16(len);
- }
+ /* Domain not sent on first Sesssetup in NTLMSSP, instead it is sent
+ along with username on auth request (ie the response to challenge) */
+ SecurityBlob->DomainName.Buffer = 0;
+ SecurityBlob->DomainName.Length = 0;
+ SecurityBlob->DomainName.MaximumLength = 0;
if (ses->capabilities & CAP_UNICODE) {
if ((long) bcc_ptr % 2) {
*bcc_ptr = 0;
@@ -2677,7 +2680,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
SecurityBlob2->MessageType));
} else if (ses) {
ses->Suid = smb_buffer_response->Uid; /* UID left in le format */
- cFYI(1, ("UID = %d ", ses->Suid));
+ cFYI(1, ("UID = %d", ses->Suid));
if ((pSMBr->resp.hdr.WordCount == 3)
|| ((pSMBr->resp.hdr.WordCount == 4)
&& (blob_len <
@@ -2685,17 +2688,17 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
if (pSMBr->resp.hdr.WordCount == 4) {
bcc_ptr += blob_len;
- cFYI(1,
- ("Security Blob Length %d ",
+ cFYI(1, ("Security Blob Length %d",
blob_len));
}
- cFYI(1, ("NTLMSSP Challenge rcvd "));
+ cFYI(1, ("NTLMSSP Challenge rcvd"));
memcpy(ses->server->cryptKey,
SecurityBlob2->Challenge,
CIFS_CRYPTO_KEY_SIZE);
- if(SecurityBlob2->NegotiateFlags & cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
+ if(SecurityBlob2->NegotiateFlags &
+ cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
*pNTLMv2_flag = TRUE;
if((SecurityBlob2->NegotiateFlags &
@@ -2818,7 +2821,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
bcc_ptr++;
} else
cFYI(1,
- ("Variable field of length %d extends beyond end of smb ",
+ ("Variable field of length %d extends beyond end of smb",
len));
}
} else {
@@ -2830,7 +2833,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
}
} else {
cERROR(1,
- (" Invalid Word count %d: ",
+ (" Invalid Word count %d:",
smb_buffer_response->WordCount));
rc = -EIO;
}
@@ -3447,7 +3450,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
if (extended_security
&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == NTLMSSP)) {
- cFYI(1, ("New style sesssetup "));
+ cFYI(1, ("New style sesssetup"));
rc = CIFSSpnegoSessSetup(xid, pSesInfo,
NULL /* security blob */,
0 /* blob length */,
@@ -3455,7 +3458,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
} else if (extended_security
&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == RawNTLMSSP)) {
- cFYI(1, ("NTLMSSP sesssetup "));
+ cFYI(1, ("NTLMSSP sesssetup"));
rc = CIFSNTLMSSPNegotiateSessSetup(xid,
pSesInfo,
&ntlmv2_flag,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 632561dd9c500..1d0ca3eaaca51 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -48,13 +48,14 @@ build_path_from_dentry(struct dentry *direntry)
struct dentry *temp;
int namelen = 0;
char *full_path;
- char dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb));
+ char dirsep;
if(direntry == NULL)
return NULL; /* not much we can do if dentry is freed and
we need to reopen the file after it was closed implicitly
when the server crashed */
+ dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb));
cifs_bp_rename_retry:
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
@@ -255,12 +256,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
CIFSSMBClose(xid, pTcon, fileHandle);
} else if(newinode) {
pCifsFile =
- kmalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
+ kzalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
if(pCifsFile == NULL)
goto cifs_create_out;
- memset((char *)pCifsFile, 0,
- sizeof (struct cifsFileInfo));
pCifsFile->netfid = fileHandle;
pCifsFile->pid = current->tgid;
pCifsFile->pInode = newinode;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fb49aef1f2ecd..5c497c529772a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -555,7 +555,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
if (ptmp) {
cFYI(1, ("closedir free smb buf in srch struct"));
pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
- cifs_buf_release(ptmp);
+ if(pCFileStruct->srch_inf.smallBuf)
+ cifs_small_buf_release(ptmp);
+ else
+ cifs_buf_release(ptmp);
}
ptmp = pCFileStruct->search_resume_name;
if (ptmp) {
@@ -574,13 +577,14 @@ int cifs_closedir(struct inode *inode, struct file *file)
int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
{
int rc, xid;
- __u32 lockType = LOCKING_ANDX_LARGE_FILES;
__u32 numLock = 0;
__u32 numUnlock = 0;
__u64 length;
int wait_flag = FALSE;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
+ __u16 netfid;
+ __u8 lockType = LOCKING_ANDX_LARGE_FILES;
length = 1 + pfLock->fl_end - pfLock->fl_start;
rc = -EACCES;
@@ -592,11 +596,11 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
pfLock->fl_end));
if (pfLock->fl_flags & FL_POSIX)
- cFYI(1, ("Posix "));
+ cFYI(1, ("Posix"));
if (pfLock->fl_flags & FL_FLOCK)
- cFYI(1, ("Flock "));
+ cFYI(1, ("Flock"));
if (pfLock->fl_flags & FL_SLEEP) {
- cFYI(1, ("Blocking lock "));
+ cFYI(1, ("Blocking lock"));
wait_flag = TRUE;
}
if (pfLock->fl_flags & FL_ACCESS)
@@ -612,21 +616,23 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
cFYI(1, ("F_WRLCK "));
numLock = 1;
} else if (pfLock->fl_type == F_UNLCK) {
- cFYI(1, ("F_UNLCK "));
+ cFYI(1, ("F_UNLCK"));
numUnlock = 1;
+ /* Check if unlock includes more than
+ one lock range */
} else if (pfLock->fl_type == F_RDLCK) {
- cFYI(1, ("F_RDLCK "));
+ cFYI(1, ("F_RDLCK"));
lockType |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1;
} else if (pfLock->fl_type == F_EXLCK) {
- cFYI(1, ("F_EXLCK "));
+ cFYI(1, ("F_EXLCK"));
numLock = 1;
} else if (pfLock->fl_type == F_SHLCK) {
- cFYI(1, ("F_SHLCK "));
+ cFYI(1, ("F_SHLCK"));
lockType |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1;
} else
- cFYI(1, ("Unknown type of lock "));
+ cFYI(1, ("Unknown type of lock"));
cifs_sb = CIFS_SB(file->f_dentry->d_sb);
pTcon = cifs_sb->tcon;
@@ -635,27 +641,41 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
FreeXid(xid);
return -EBADF;
}
+ netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
+
+ /* BB add code here to normalize offset and length to
+ account for negative length which we can not accept over the
+ wire */
if (IS_GETLK(cmd)) {
- rc = CIFSSMBLock(xid, pTcon,
- ((struct cifsFileInfo *)file->
- private_data)->netfid,
- length,
- pfLock->fl_start, 0, 1, lockType,
- 0 /* wait flag */ );
+ if(experimEnabled &&
+ (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
+ (CIFS_UNIX_FCNTL_CAP &
+ le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
+ int posix_lock_type;
+ if(lockType & LOCKING_ANDX_SHARED_LOCK)
+ posix_lock_type = CIFS_RDLCK;
+ else
+ posix_lock_type = CIFS_WRLCK;
+ rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
+ length, pfLock->fl_start,
+ posix_lock_type, wait_flag);
+ FreeXid(xid);
+ return rc;
+ }
+
+ /* BB we could chain these into one lock request BB */
+ rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
+ 0, 1, lockType, 0 /* wait flag */ );
if (rc == 0) {
- rc = CIFSSMBLock(xid, pTcon,
- ((struct cifsFileInfo *) file->
- private_data)->netfid,
- length,
+ rc = CIFSSMBLock(xid, pTcon, netfid, length,
pfLock->fl_start, 1 /* numUnlock */ ,
0 /* numLock */ , lockType,
0 /* wait flag */ );
pfLock->fl_type = F_UNLCK;
if (rc != 0)
cERROR(1, ("Error unlocking previously locked "
- "range %d during test of lock ",
- rc));
+ "range %d during test of lock", rc));
rc = 0;
} else {
@@ -667,12 +687,30 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
FreeXid(xid);
return rc;
}
-
- rc = CIFSSMBLock(xid, pTcon,
- ((struct cifsFileInfo *) file->private_data)->
- netfid, length,
- pfLock->fl_start, numUnlock, numLock, lockType,
- wait_flag);
+ if (experimEnabled &&
+ (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
+ (CIFS_UNIX_FCNTL_CAP &
+ le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
+ int posix_lock_type;
+ if(lockType & LOCKING_ANDX_SHARED_LOCK)
+ posix_lock_type = CIFS_RDLCK;
+ else
+ posix_lock_type = CIFS_WRLCK;
+
+ if(numUnlock == 1)
+ posix_lock_type = CIFS_UNLCK;
+ else if(numLock == 0) {
+ /* if no lock or unlock then nothing
+ to do since we do not know what it is */
+ FreeXid(xid);
+ return -EOPNOTSUPP;
+ }
+ rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
+ length, pfLock->fl_start,
+ posix_lock_type, wait_flag);
+ } else
+ rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
+ numUnlock, numLock, lockType, wait_flag);
if (pfLock->fl_flags & FL_POSIX)
posix_lock_file_wait(file, pfLock);
FreeXid(xid);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 598eec9778f67..957ddd1571c6f 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -565,11 +565,14 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
struct cifsInodeInfo *cifsInode;
FILE_BASIC_INFO *pinfo_buf;
- cFYI(1, ("cifs_unlink, inode = 0x%p with ", inode));
+ cFYI(1, ("cifs_unlink, inode = 0x%p", inode));
xid = GetXid();
- cifs_sb = CIFS_SB(inode->i_sb);
+ if(inode)
+ cifs_sb = CIFS_SB(inode->i_sb);
+ else
+ cifs_sb = CIFS_SB(direntry->d_sb);
pTcon = cifs_sb->tcon;
/* Unlink can be called from rename so we can not grab the sem here
@@ -609,9 +612,8 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
}
} else if (rc == -EACCES) {
/* try only if r/o attribute set in local lookup data? */
- pinfo_buf = kmalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
+ pinfo_buf = kzalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
if (pinfo_buf) {
- memset(pinfo_buf, 0, sizeof(FILE_BASIC_INFO));
/* ATTRS set to normal clears r/o bit */
pinfo_buf->Attributes = cpu_to_le32(ATTR_NORMAL);
if (!(pTcon->ses->flags & CIFS_SES_NT4))
@@ -693,9 +695,11 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
when needed */
direntry->d_inode->i_ctime = current_fs_time(inode->i_sb);
}
- inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
- cifsInode = CIFS_I(inode);
- cifsInode->time = 0; /* force revalidate of dir as well */
+ if(inode) {
+ inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
+ cifsInode = CIFS_I(inode);
+ cifsInode->time = 0; /* force revalidate of dir as well */
+ }
kfree(full_path);
FreeXid(xid);
@@ -1167,7 +1171,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
nfid, npid, FALSE);
atomic_dec(&open_file->wrtPending);
cFYI(1,("SetFSize for attrs rc = %d", rc));
- if(rc == -EINVAL) {
+ if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
int bytes_written;
rc = CIFSSMBWrite(xid, pTcon,
nfid, 0, attrs->ia_size,
@@ -1189,7 +1193,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc));
- if(rc == -EINVAL) {
+ if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
__u16 netfid;
int oplock = FALSE;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 8d0da7c87c7b6..9562f5bba65c1 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -67,7 +67,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
cifs_sb_target->local_nls,
cifs_sb_target->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if(rc == -EIO)
+ if((rc == -EIO) || (rc == -EINVAL))
rc = -EOPNOTSUPP;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 432ba15e2c2dc..fafd056426e4b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -72,10 +72,9 @@ sesInfoAlloc(void)
struct cifsSesInfo *ret_buf;
ret_buf =
- (struct cifsSesInfo *) kmalloc(sizeof (struct cifsSesInfo),
+ (struct cifsSesInfo *) kzalloc(sizeof (struct cifsSesInfo),
GFP_KERNEL);
if (ret_buf) {
- memset(ret_buf, 0, sizeof (struct cifsSesInfo));
write_lock(&GlobalSMBSeslock);
atomic_inc(&sesInfoAllocCount);
ret_buf->status = CifsNew;
@@ -110,10 +109,9 @@ tconInfoAlloc(void)
{
struct cifsTconInfo *ret_buf;
ret_buf =
- (struct cifsTconInfo *) kmalloc(sizeof (struct cifsTconInfo),
+ (struct cifsTconInfo *) kzalloc(sizeof (struct cifsTconInfo),
GFP_KERNEL);
if (ret_buf) {
- memset(ret_buf, 0, sizeof (struct cifsTconInfo));
write_lock(&GlobalSMBSeslock);
atomic_inc(&tconInfoAllocCount);
list_add(&ret_buf->cifsConnectionList,
@@ -423,9 +421,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
{
__u32 len = smb->smb_buf_length;
__u32 clc_len; /* calculated length */
- cFYI(0,
- ("Entering checkSMB with Length: %x, smb_buf_length: %x",
- length, len));
+ cFYI(0, ("checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len));
if (((unsigned int)length < 2 + sizeof (struct smb_hdr)) ||
(len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)) {
if ((unsigned int)length < 2 + sizeof (struct smb_hdr)) {
@@ -433,29 +429,36 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
sizeof (struct smb_hdr) - 1)
&& (smb->Status.CifsError != 0)) {
smb->WordCount = 0;
- return 0; /* some error cases do not return wct and bcc */
+ /* some error cases do not return wct and bcc */
+ return 0;
} else {
cERROR(1, ("Length less than smb header size"));
}
-
}
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
- cERROR(1,
- ("smb_buf_length greater than MaxBufSize"));
- cERROR(1,
- ("bad smb detected. Illegal length. mid=%d",
- smb->Mid));
+ cERROR(1, ("smb length greater than MaxBufSize, mid=%d",
+ smb->Mid));
return 1;
}
if (checkSMBhdr(smb, mid))
return 1;
clc_len = smbCalcSize_LE(smb);
- if ((4 + len != clc_len)
- || (4 + len != (unsigned int)length)) {
- cERROR(1, ("Calculated size 0x%x vs actual length 0x%x",
- clc_len, 4 + len));
- cERROR(1, ("bad smb size detected for Mid=%d", smb->Mid));
+
+ if(4 + len != (unsigned int)length) {
+ cERROR(1, ("Length read does not match RFC1001 length %d",len));
+ return 1;
+ }
+
+ if (4 + len != clc_len) {
+ /* check if bcc wrapped around for large read responses */
+ if((len > 64 * 1024) && (len > clc_len)) {
+ /* check if lengths match mod 64K */
+ if(((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
+ return 0; /* bcc wrapped */
+ }
+ cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d",
+ clc_len, 4 + len, smb->Mid));
/* Windows XP can return a few bytes too much, presumably
an illegal pad, at the end of byte range lock responses
so we allow for that three byte pad, as long as actual
@@ -469,8 +472,11 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
wct and bcc to minimum size and drop the t2 parms and data */
if((4+len > clc_len) && (len <= clc_len + 512))
return 0;
- else
+ else {
+ cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d",
+ len, smb->Mid));
return 1;
+ }
}
return 0;
}
diff --git a/fs/cifs/ntlmssp.c b/fs/cifs/ntlmssp.c
new file mode 100644
index 0000000000000..78866f9257474
--- /dev/null
+++ b/fs/cifs/ntlmssp.c
@@ -0,0 +1,129 @@
+/*
+ * fs/cifs/ntlmssp.h
+ *
+ * Copyright (c) International Business Machines Corp., 2006
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "ntlmssp.h"
+#include "nterr.h"
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
+{
+ __u32 capabilities = 0;
+
+ /* init fields common to all four types of SessSetup */
+ /* note that header is initialized to zero in header_assemble */
+ pSMB->req.AndXCommand = 0xFF;
+ pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
+ pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+
+ /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
+
+ /* BB verify whether signing required on neg or just on auth frame
+ (and NTLM case) */
+
+ capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+ CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
+
+ if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+ if (ses->capabilities & CAP_UNICODE) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
+ capabilities |= CAP_UNICODE;
+ }
+ if (ses->capabilities & CAP_STATUS32) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
+ capabilities |= CAP_STATUS32;
+ }
+ if (ses->capabilities & CAP_DFS) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
+ capabilities |= CAP_DFS;
+ }
+
+ /* BB check whether to init vcnum BB */
+ return capabilities;
+}
+int
+CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, const int type,
+ int * pNTLMv2_flg, const struct nls_table *nls_cp)
+{
+ int rc = 0;
+ int wct;
+ struct smb_hdr *smb_buffer;
+ char *bcc_ptr;
+ SESSION_SETUP_ANDX *pSMB;
+ __u32 capabilities;
+
+ if(ses == NULL)
+ return -EINVAL;
+
+ cFYI(1,("SStp type: %d",type));
+ if(type < CIFS_NTLM) {
+#ifndef CONFIG_CIFS_WEAK_PW_HASH
+ /* LANMAN and plaintext are less secure and off by default.
+ So we make this explicitly be turned on in kconfig (in the
+ build) and turned on at runtime (changed from the default)
+ in proc/fs/cifs or via mount parm. Unfortunately this is
+ needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
+ return -EOPNOTSUPP;
+#endif
+ wct = 10; /* lanman 2 style sessionsetup */
+ } else if(type < CIFS_NTLMSSP_NEG)
+ wct = 13; /* old style NTLM sessionsetup */
+ else /* same size for negotiate or auth, NTLMSSP or extended security */
+ wct = 12;
+
+ rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
+ (void **)&smb_buffer);
+ if(rc)
+ return rc;
+
+ pSMB = (SESSION_SETUP_ANDX *)smb_buffer;
+
+ capabilities = cifs_ssetup_hdr(ses, pSMB);
+ bcc_ptr = pByteArea(smb_buffer);
+ if(type > CIFS_NTLM) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+ capabilities |= CAP_EXTENDED_SECURITY;
+ pSMB->req.Capabilities = cpu_to_le32(capabilities);
+ /* BB set password lengths */
+ } else if(type < CIFS_NTLM) /* lanman */ {
+ /* no capabilities flags in old lanman negotiation */
+ /* pSMB->old_req.PasswordLength = */ /* BB fixme BB */
+ } else /* type CIFS_NTLM */ {
+ pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+ pSMB->req_no_secext.CaseInsensitivePasswordLength =
+ cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ }
+
+
+/* rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buf_type, 0); */
+ /* SMB request buf freed in SendReceive2 */
+
+ return rc;
+}
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 803389b64a2c7..d39b712a11c5b 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/ntlmssp.h
*
- * Copyright (c) International Business Machines Corp., 2002
+ * Copyright (c) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 488bd0d81dcf8..2f6e2825571ec 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -604,7 +604,12 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
cifsFile->search_resume_name = NULL;
if(cifsFile->srch_inf.ntwrk_buf_start) {
cFYI(1,("freeing SMB ff cache buf on search rewind"));
- cifs_buf_release(cifsFile->srch_inf.ntwrk_buf_start);
+ if(cifsFile->srch_inf.smallBuf)
+ cifs_small_buf_release(cifsFile->srch_inf.
+ ntwrk_buf_start);
+ else
+ cifs_buf_release(cifsFile->srch_inf.
+ ntwrk_buf_start);
}
rc = initiate_cifs_search(xid,file);
if(rc) {
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b12cb8a7da7c8..3da80409466cf 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -309,17 +309,16 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
*pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
- if (ses == NULL) {
- cERROR(1,("Null smb session"));
- return -EIO;
- }
- if(ses->server == NULL) {
- cERROR(1,("Null tcp session"));
+ if ((ses == NULL) || (ses->server == NULL)) {
+ cifs_small_buf_release(in_buf);
+ cERROR(1,("Null session"));
return -EIO;
}
- if(ses->server->tcpStatus == CifsExiting)
+ if(ses->server->tcpStatus == CifsExiting) {
+ cifs_small_buf_release(in_buf);
return -ENOENT;
+ }
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
@@ -346,6 +345,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
} else {
if(ses->server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
+ cifs_small_buf_release(in_buf);
return -ENOENT;
}
@@ -385,6 +385,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
midQ = AllocMidQEntry(in_buf, ses);
if (midQ == NULL) {
up(&ses->server->tcpSem);
+ cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
@@ -408,14 +409,18 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if(rc < 0) {
DeleteMidQEntry(midQ);
up(&ses->server->tcpSem);
+ cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
return rc;
- } else
+ } else {
up(&ses->server->tcpSem);
+ cifs_small_buf_release(in_buf);
+ }
+
if (long_op == -1)
goto cifs_no_response_exit2;
else if (long_op == 2) /* writes past end of file can take loong time */
@@ -543,6 +548,7 @@ cifs_no_response_exit2:
out_unlock2:
up(&ses->server->tcpSem);
+ cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
diff --git a/fs/dcache.c b/fs/dcache.c
index 19458d3995024..940d188e5d14a 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1101,6 +1101,32 @@ next:
}
/**
+ * d_hash_and_lookup - hash the qstr then search for a dentry
+ * @dir: Directory to search in
+ * @name: qstr of name we wish to find
+ *
+ * On hash failure or on lookup failure NULL is returned.
+ */
+struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
+{
+ struct dentry *dentry = NULL;
+
+ /*
+ * Check for a fs-specific hash function. Note that we must
+ * calculate the standard hash first, as the d_op->d_hash()
+ * routine may choose to leave the hash value unchanged.
+ */
+ name->hash = full_name_hash(name->name, name->len);
+ if (dir->d_op && dir->d_op->d_hash) {
+ if (dir->d_op->d_hash(dir, name) < 0)
+ goto out;
+ }
+ dentry = d_lookup(dir, name);
+out:
+ return dentry;
+}
+
+/**
* d_validate - verify dentry provided from insecure source
* @dentry: The dentry alleged to be valid child of @dparent
* @dparent: The parent dentry (known to be valid)
@@ -1172,11 +1198,11 @@ void d_delete(struct dentry * dentry)
spin_lock(&dentry->d_lock);
isdir = S_ISDIR(dentry->d_inode->i_mode);
if (atomic_read(&dentry->d_count) == 1) {
- /* remove this and other inotify debug checks after 2.6.18 */
- dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
-
dentry_iput(dentry);
fsnotify_nameremove(dentry, isdir);
+
+ /* remove this and other inotify debug checks after 2.6.18 */
+ dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
return;
}
@@ -1616,26 +1642,12 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
struct dentry * dentry;
ino_t ino = 0;
- /*
- * Check for a fs-specific hash function. Note that we must
- * calculate the standard hash first, as the d_op->d_hash()
- * routine may choose to leave the hash value unchanged.
- */
- name->hash = full_name_hash(name->name, name->len);
- if (dir->d_op && dir->d_op->d_hash)
- {
- if (dir->d_op->d_hash(dir, name) != 0)
- goto out;
- }
-
- dentry = d_lookup(dir, name);
- if (dentry)
- {
+ dentry = d_hash_and_lookup(dir, name);
+ if (dentry) {
if (dentry->d_inode)
ino = dentry->d_inode->i_ino;
dput(dentry);
}
-out:
return ino;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 910a8ed74b5d2..b05d1b2187769 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -929,8 +929,7 @@ do_holes:
block_in_page += this_chunk_blocks;
dio->blocks_available -= this_chunk_blocks;
next_block:
- if (dio->block_in_file > dio->final_block_in_request)
- BUG();
+ BUG_ON(dio->block_in_file > dio->final_block_in_request);
if (dio->block_in_file == dio->final_block_in_request)
break;
}
diff --git a/fs/dquot.c b/fs/dquot.c
index 6b38869209397..81d87a413c68c 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -590,8 +590,7 @@ we_slept:
atomic_dec(&dquot->dq_count);
#ifdef __DQUOT_PARANOIA
/* sanity check */
- if (!list_empty(&dquot->dq_free))
- BUG();
+ BUG_ON(!list_empty(&dquot->dq_free));
#endif
put_dquot_last(dquot);
spin_unlock(&dq_list_lock);
@@ -666,8 +665,7 @@ we_slept:
return NODQUOT;
}
#ifdef __DQUOT_PARANOIA
- if (!dquot->dq_sb) /* Has somebody invalidated entry under us? */
- BUG();
+ BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
#endif
return dquot;
diff --git a/fs/exec.c b/fs/exec.c
index 950ebd43cdc35..0291a68a36261 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -561,7 +561,7 @@ static int exec_mmap(struct mm_struct *mm)
arch_pick_mmap_layout(mm);
if (old_mm) {
up_read(&old_mm->mmap_sem);
- if (active_mm != old_mm) BUG();
+ BUG_ON(active_mm != old_mm);
mmput(old_mm);
return 0;
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 2a2479196f965..d35cbc6bc1120 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -453,8 +453,7 @@ static void send_sigio_to_task(struct task_struct *p,
/* Make sure we are called with one of the POLL_*
reasons, otherwise we could leak kernel stack into
userspace. */
- if ((reason & __SI_MASK) != __SI_POLL)
- BUG();
+ BUG_ON((reason & __SI_MASK) != __SI_POLL);
if (reason - POLL_IN >= NSIGPOLL)
si.si_band = ~0L;
else
diff --git a/fs/freevxfs/vxfs_olt.c b/fs/freevxfs/vxfs_olt.c
index 76a0708ae9789..0495008479034 100644
--- a/fs/freevxfs/vxfs_olt.c
+++ b/fs/freevxfs/vxfs_olt.c
@@ -42,24 +42,21 @@
static inline void
vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp)
{
- if (infp->vsi_fshino)
- BUG();
+ BUG_ON(infp->vsi_fshino);
infp->vsi_fshino = fshp->olt_fsino[0];
}
static inline void
vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp)
{
- if (infp->vsi_iext)
- BUG();
+ BUG_ON(infp->vsi_iext);
infp->vsi_iext = ilistp->olt_iext[0];
}
static inline u_long
vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize)
{
- if (sbp->s_blocksize % bsize)
- BUG();
+ BUG_ON(sbp->s_blocksize % bsize);
return (block * (sbp->s_blocksize / bsize));
}
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 8f07e8fbd03d3..746abc9ecf70f 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -466,8 +466,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
;
- if (!*p)
- BUG();
+ BUG_ON(!*p);
*p = node->next_hash;
node->tree->node_hash_cnt--;
}
@@ -622,8 +621,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
node->tree->cnid, node->this, atomic_read(&node->refcnt));
- if (!atomic_read(&node->refcnt))
- BUG();
+ BUG_ON(!atomic_read(&node->refcnt));
if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
return;
for (i = 0; i < tree->pages_per_bnode; i++) {
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index a67edfa34e9ec..effa8991999c9 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -269,8 +269,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
u8 *data, byte, m;
dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
- if (!node->this)
- BUG();
+ BUG_ON(!node->this);
tree = node->tree;
nidx = node->this;
node = hfs_bnode_find(tree, 0);
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index 2ba20cdb5baa7..5e6363be246f6 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -216,10 +216,10 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
static struct inode_operations hppfs_file_iops = {
};
-static ssize_t read_proc(struct file *file, char *buf, ssize_t count,
+static ssize_t read_proc(struct file *file, char __user *buf, ssize_t count,
loff_t *ppos, int is_user)
{
- ssize_t (*read)(struct file *, char *, size_t, loff_t *);
+ ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
ssize_t n;
read = file->f_dentry->d_inode->i_fop->read;
@@ -236,7 +236,7 @@ static ssize_t read_proc(struct file *file, char *buf, ssize_t count,
return n;
}
-static ssize_t hppfs_read_file(int fd, char *buf, ssize_t count)
+static ssize_t hppfs_read_file(int fd, char __user *buf, ssize_t count)
{
ssize_t n;
int cur, err;
@@ -274,7 +274,7 @@ static ssize_t hppfs_read_file(int fd, char *buf, ssize_t count)
return n;
}
-static ssize_t hppfs_read(struct file *file, char *buf, size_t count,
+static ssize_t hppfs_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct hppfs_private *hppfs = file->private_data;
@@ -313,12 +313,12 @@ static ssize_t hppfs_read(struct file *file, char *buf, size_t count,
return(count);
}
-static ssize_t hppfs_write(struct file *file, const char *buf, size_t len,
+static ssize_t hppfs_write(struct file *file, const char __user *buf, size_t len,
loff_t *ppos)
{
struct hppfs_private *data = file->private_data;
struct file *proc_file = data->proc_file;
- ssize_t (*write)(struct file *, const char *, size_t, loff_t *);
+ ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
int err;
write = proc_file->f_dentry->d_inode->i_fop->write;
@@ -658,7 +658,7 @@ static struct super_operations hppfs_sbops = {
.statfs = hppfs_statfs,
};
-static int hppfs_readlink(struct dentry *dentry, char *buffer, int buflen)
+static int hppfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct file *proc_file;
struct dentry *proc_dentry;
diff --git a/fs/inode.c b/fs/inode.c
index 32b7c3375021f..3a2446a27d2c2 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -172,8 +172,7 @@ static struct inode *alloc_inode(struct super_block *sb)
void destroy_inode(struct inode *inode)
{
- if (inode_has_buffers(inode))
- BUG();
+ BUG_ON(inode_has_buffers(inode));
security_inode_free(inode);
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
@@ -249,12 +248,9 @@ void clear_inode(struct inode *inode)
might_sleep();
invalidate_inode_buffers(inode);
- if (inode->i_data.nrpages)
- BUG();
- if (!(inode->i_state & I_FREEING))
- BUG();
- if (inode->i_state & I_CLEAR)
- BUG();
+ BUG_ON(inode->i_data.nrpages);
+ BUG_ON(!(inode->i_state & I_FREEING));
+ BUG_ON(inode->i_state & I_CLEAR);
wait_on_inode(inode);
DQUOT_DROP(inode);
if (inode->i_sb && inode->i_sb->s_op->clear_inode)
@@ -1054,8 +1050,7 @@ void generic_delete_inode(struct inode *inode)
hlist_del_init(&inode->i_hash);
spin_unlock(&inode_lock);
wake_up_inode(inode);
- if (inode->i_state != I_CLEAR)
- BUG();
+ BUG_ON(inode->i_state != I_CLEAR);
destroy_inode(inode);
}
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 7b77a9541125b..ff2a872e80e78 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -35,8 +35,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
pid_t pid;
int ret = 0;
- if (c->gc_task)
- BUG();
+ BUG_ON(c->gc_task);
init_completion(&c->gc_thread_start);
init_completion(&c->gc_thread_exit);
diff --git a/fs/locks.c b/fs/locks.c
index 4d9e71d43e7e2..dda83d6cd48b1 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -168,18 +168,9 @@ static void locks_release_private(struct file_lock *fl)
/* Free a lock which is not in use. */
static void locks_free_lock(struct file_lock *fl)
{
- if (fl == NULL) {
- BUG();
- return;
- }
- if (waitqueue_active(&fl->fl_wait))
- panic("Attempting to free lock with active wait queue");
-
- if (!list_empty(&fl->fl_block))
- panic("Attempting to free lock with active block list");
-
- if (!list_empty(&fl->fl_link))
- panic("Attempting to free lock on active lock list");
+ BUG_ON(waitqueue_active(&fl->fl_wait));
+ BUG_ON(!list_empty(&fl->fl_block));
+ BUG_ON(!list_empty(&fl->fl_link));
locks_release_private(fl);
kmem_cache_free(filelock_cache, fl);
@@ -735,8 +726,9 @@ EXPORT_SYMBOL(posix_locks_deadlock);
* at the head of the list, but that's secret knowledge known only to
* flock_lock_file and posix_lock_file.
*/
-static int flock_lock_file(struct file *filp, struct file_lock *new_fl)
+static int flock_lock_file(struct file *filp, struct file_lock *request)
{
+ struct file_lock *new_fl = NULL;
struct file_lock **before;
struct inode * inode = filp->f_dentry->d_inode;
int error = 0;
@@ -751,17 +743,19 @@ static int flock_lock_file(struct file *filp, struct file_lock *new_fl)
continue;
if (filp != fl->fl_file)
continue;
- if (new_fl->fl_type == fl->fl_type)
+ if (request->fl_type == fl->fl_type)
goto out;
found = 1;
locks_delete_lock(before);
break;
}
- unlock_kernel();
- if (new_fl->fl_type == F_UNLCK)
- return 0;
+ if (request->fl_type == F_UNLCK)
+ goto out;
+ new_fl = locks_alloc_lock();
+ if (new_fl == NULL)
+ goto out;
/*
* If a higher-priority process was blocked on the old file lock,
* give it the opportunity to lock the file.
@@ -769,26 +763,27 @@ static int flock_lock_file(struct file *filp, struct file_lock *new_fl)
if (found)
cond_resched();
- lock_kernel();
for_each_lock(inode, before) {
struct file_lock *fl = *before;
if (IS_POSIX(fl))
break;
if (IS_LEASE(fl))
continue;
- if (!flock_locks_conflict(new_fl, fl))
+ if (!flock_locks_conflict(request, fl))
continue;
error = -EAGAIN;
- if (new_fl->fl_flags & FL_SLEEP) {
- locks_insert_block(fl, new_fl);
- }
+ if (request->fl_flags & FL_SLEEP)
+ locks_insert_block(fl, request);
goto out;
}
+ locks_copy_lock(new_fl, request);
locks_insert_lock(&inode->i_flock, new_fl);
- error = 0;
+ new_fl = NULL;
out:
unlock_kernel();
+ if (new_fl)
+ locks_free_lock(new_fl);
return error;
}
@@ -1569,9 +1564,7 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
error = flock_lock_file_wait(filp, lock);
out_free:
- if (list_empty(&lock->fl_link)) {
- locks_free_lock(lock);
- }
+ locks_free_lock(lock);
out_putf:
fput(filp);
diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c
index 626a367bcd811..5b76ccd19e3fc 100644
--- a/fs/msdos/namei.c
+++ b/fs/msdos/namei.c
@@ -12,14 +12,6 @@
#include <linux/msdos_fs.h>
#include <linux/smp_lock.h>
-/* MS-DOS "device special files" */
-static const unsigned char *reserved_names[] = {
- "CON ", "PRN ", "NUL ", "AUX ",
- "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ",
- "COM1 ", "COM2 ", "COM3 ", "COM4 ",
- NULL
-};
-
/* Characters that are undesirable in an MS-DOS file name */
static unsigned char bad_chars[] = "*?<>|\"";
static unsigned char bad_if_strict_pc[] = "+=,; ";
@@ -40,7 +32,6 @@ static int msdos_format_name(const unsigned char *name, int len,
*/
{
unsigned char *walk;
- const unsigned char **reserved;
unsigned char c;
int space;
@@ -127,11 +118,7 @@ static int msdos_format_name(const unsigned char *name, int len,
}
while (walk - res < MSDOS_NAME)
*walk++ = ' ';
- if (!opts->atari)
- /* GEMDOS is less stupid and has no reserved names */
- for (reserved = reserved_names; *reserved; reserved++)
- if (!strncmp(res, *reserved, 8))
- return -EINVAL;
+
return 0;
}
diff --git a/fs/namei.c b/fs/namei.c
index 22f6e8d16aa85..96723ae83c89d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1254,7 +1254,7 @@ out:
return dentry;
}
-struct dentry * lookup_hash(struct nameidata *nd)
+static struct dentry *lookup_hash(struct nameidata *nd)
{
return __lookup_hash(&nd->last, nd->dentry, nd);
}
@@ -2697,7 +2697,6 @@ EXPORT_SYMBOL(follow_up);
EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(lock_rename);
-EXPORT_SYMBOL(lookup_hash);
EXPORT_SYMBOL(lookup_one_len);
EXPORT_SYMBOL(page_follow_link_light);
EXPORT_SYMBOL(page_put_link);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8f1f49ceebec5..a3a3eecef6894 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -534,12 +534,15 @@ static int proc_oom_score(struct task_struct *task, char *buffer)
/* If the process being read is separated by chroot from the reading process,
* don't let the reader access the threads.
+ *
+ * note: this does dput(root) and mntput(vfsmnt) on exit.
*/
static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt)
{
struct dentry *de, *base;
struct vfsmount *our_vfsmnt, *mnt;
int res = 0;
+
read_lock(&current->fs->lock);
our_vfsmnt = mntget(current->fs->rootmnt);
base = dget(current->fs->root);
@@ -549,11 +552,11 @@ static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt)
de = root;
mnt = vfsmnt;
- while (vfsmnt != our_vfsmnt) {
- if (vfsmnt == vfsmnt->mnt_parent)
+ while (mnt != our_vfsmnt) {
+ if (mnt == mnt->mnt_parent)
goto out;
- de = vfsmnt->mnt_mountpoint;
- vfsmnt = vfsmnt->mnt_parent;
+ de = mnt->mnt_mountpoint;
+ mnt = mnt->mnt_parent;
}
if (!is_subdir(de, base))
@@ -564,7 +567,7 @@ exit:
dput(base);
mntput(our_vfsmnt);
dput(root);
- mntput(mnt);
+ mntput(vfsmnt);
return res;
out:
spin_unlock(&vfsmount_lock);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index ef5a3323f4b55..5c10ea1574251 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -249,144 +249,60 @@ static int cpuinfo_open(struct inode *inode, struct file *file)
return seq_open(file, &cpuinfo_op);
}
-enum devinfo_states {
- CHR_HDR,
- CHR_LIST,
- BLK_HDR,
- BLK_LIST,
- DEVINFO_DONE
-};
-
-struct devinfo_state {
- void *chrdev;
- void *blkdev;
- unsigned int num_records;
- unsigned int cur_record;
- enum devinfo_states state;
+static struct file_operations proc_cpuinfo_operations = {
+ .open = cpuinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
};
-static void *devinfo_start(struct seq_file *f, loff_t *pos)
+static int devinfo_show(struct seq_file *f, void *v)
{
- struct devinfo_state *info = f->private;
+ int i = *(loff_t *) v;
- if (*pos) {
- if ((info) && (*pos <= info->num_records))
- return info;
- return NULL;
+ if (i < CHRDEV_MAJOR_HASH_SIZE) {
+ if (i == 0)
+ seq_printf(f, "Character devices:\n");
+ chrdev_show(f, i);
+ } else {
+ i -= CHRDEV_MAJOR_HASH_SIZE;
+ if (i == 0)
+ seq_printf(f, "\nBlock devices:\n");
+ blkdev_show(f, i);
}
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- f->private = info;
- info->chrdev = acquire_chrdev_list();
- info->blkdev = acquire_blkdev_list();
- info->state = CHR_HDR;
- info->num_records = count_chrdev_list();
- info->num_records += count_blkdev_list();
- info->num_records += 2; /* Character and Block headers */
- *pos = 1;
- info->cur_record = *pos;
- return info;
+ return 0;
}
-static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
+static void *devinfo_start(struct seq_file *f, loff_t *pos)
{
- int idummy;
- char *ndummy;
- struct devinfo_state *info = f->private;
-
- switch (info->state) {
- case CHR_HDR:
- info->state = CHR_LIST;
- (*pos)++;
- /*fallthrough*/
- case CHR_LIST:
- if (get_chrdev_info(info->chrdev,&idummy,&ndummy)) {
- /*
- * The character dev list is complete
- */
- info->state = BLK_HDR;
- } else {
- info->chrdev = get_next_chrdev(info->chrdev);
- }
- (*pos)++;
- break;
- case BLK_HDR:
- info->state = BLK_LIST;
- (*pos)++;
- /*fallthrough*/
- case BLK_LIST:
- if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) {
- /*
- * The block dev list is complete
- */
- info->state = DEVINFO_DONE;
- } else {
- info->blkdev = get_next_blkdev(info->blkdev);
- }
- (*pos)++;
- break;
- case DEVINFO_DONE:
- (*pos)++;
- info->cur_record = *pos;
- info = NULL;
- break;
- default:
- break;
- }
- if (info)
- info->cur_record = *pos;
- return info;
+ if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
+ return pos;
+ return NULL;
}
-static void devinfo_stop(struct seq_file *f, void *v)
+static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
{
- struct devinfo_state *info = f->private;
-
- if (info) {
- release_chrdev_list(info->chrdev);
- release_blkdev_list(info->blkdev);
- f->private = NULL;
- kfree(info);
- }
+ (*pos)++;
+ if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
+ return NULL;
+ return pos;
}
-static int devinfo_show(struct seq_file *f, void *arg)
-{
- int major;
- char *name;
- struct devinfo_state *info = f->private;
-
- switch(info->state) {
- case CHR_HDR:
- seq_printf(f,"Character devices:\n");
- /* fallthrough */
- case CHR_LIST:
- if (!get_chrdev_info(info->chrdev,&major,&name))
- seq_printf(f,"%3d %s\n",major,name);
- break;
- case BLK_HDR:
- seq_printf(f,"\nBlock devices:\n");
- /* fallthrough */
- case BLK_LIST:
- if (!get_blkdev_info(info->blkdev,&major,&name))
- seq_printf(f,"%3d %s\n",major,name);
- break;
- default:
- break;
- }
-
- return 0;
+static void devinfo_stop(struct seq_file *f, void *v)
+{
+ /* Nothing to do */
}
-static struct seq_operations devinfo_op = {
- .start = devinfo_start,
- .next = devinfo_next,
- .stop = devinfo_stop,
- .show = devinfo_show,
+static struct seq_operations devinfo_ops = {
+ .start = devinfo_start,
+ .next = devinfo_next,
+ .stop = devinfo_stop,
+ .show = devinfo_show
};
-static int devinfo_open(struct inode *inode, struct file *file)
+static int devinfo_open(struct inode *inode, struct file *filp)
{
- return seq_open(file, &devinfo_op);
+ return seq_open(filp, &devinfo_ops);
}
static struct file_operations proc_devinfo_operations = {
@@ -396,13 +312,6 @@ static struct file_operations proc_devinfo_operations = {
.release = seq_release,
};
-static struct file_operations proc_cpuinfo_operations = {
- .open = cpuinfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
extern struct seq_operations vmstat_op;
static int vmstat_open(struct inode *inode, struct file *file)
{
diff --git a/fs/select.c b/fs/select.c
index b3a3a1326af66..071660fa7b01e 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -314,7 +314,7 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
int ret, size, max_fdset;
struct fdtable *fdt;
/* Allocate small arguments on the stack to save memory and be faster */
- char stack_fds[SELECT_STACK_ALLOC];
+ long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
ret = -EINVAL;
if (n < 0)
@@ -639,8 +639,10 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
struct poll_list *walk;
struct fdtable *fdt;
int max_fdset;
- /* Allocate small arguments on the stack to save memory and be faster */
- char stack_pps[POLL_STACK_ALLOC];
+ /* Allocate small arguments on the stack to save memory and be
+ faster - use long to make sure the buffer is aligned properly
+ on 64 bit archs to avoid unaligned access */
+ long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
struct poll_list *stack_pp = NULL;
/* Do a sanity check on nfds ... */
diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c
index c56bd99a97018..ed9a24d19d7d3 100644
--- a/fs/smbfs/file.c
+++ b/fs/smbfs/file.c
@@ -178,11 +178,9 @@ smb_writepage(struct page *page, struct writeback_control *wbc)
unsigned offset = PAGE_CACHE_SIZE;
int err;
- if (!mapping)
- BUG();
+ BUG_ON(!mapping);
inode = mapping->host;
- if (!inode)
- BUG();
+ BUG_ON(!inode);
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
diff --git a/fs/splice.c b/fs/splice.c
index 7c2bbf18d7a7c..6081cf7d2d1ba 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -106,7 +106,7 @@ static struct pipe_buf_operations page_cache_pipe_buf_ops = {
static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
int nr_pages, unsigned long offset,
- unsigned long len)
+ unsigned long len, unsigned int flags)
{
struct pipe_inode_info *info;
int ret, do_wakeup, i;
@@ -159,6 +159,12 @@ static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
break;
}
+ if (flags & SPLICE_F_NONBLOCK) {
+ if (!ret)
+ ret = -EAGAIN;
+ break;
+ }
+
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
@@ -191,7 +197,7 @@ static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
}
static int __generic_file_splice_read(struct file *in, struct inode *pipe,
- size_t len)
+ size_t len, unsigned int flags)
{
struct address_space *mapping = in->f_mapping;
unsigned int offset, nr_pages;
@@ -279,7 +285,7 @@ static int __generic_file_splice_read(struct file *in, struct inode *pipe,
* Now we splice them into the pipe..
*/
splice_them:
- return move_to_pipe(pipe, pages, i, offset, len);
+ return move_to_pipe(pipe, pages, i, offset, len, flags);
}
ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
@@ -291,7 +297,7 @@ ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
ret = 0;
spliced = 0;
while (len) {
- ret = __generic_file_splice_read(in, pipe, len);
+ ret = __generic_file_splice_read(in, pipe, len, flags);
if (ret <= 0)
break;
@@ -299,6 +305,11 @@ ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
in->f_pos += ret;
len -= ret;
spliced += ret;
+
+ if (!(flags & SPLICE_F_NONBLOCK))
+ continue;
+ ret = -EAGAIN;
+ break;
}
if (spliced)
@@ -527,6 +538,12 @@ static ssize_t move_from_pipe(struct inode *inode, struct file *out,
break;
}
+ if (flags & SPLICE_F_NONBLOCK) {
+ if (!ret)
+ ret = -EAGAIN;
+ break;
+ }
+
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
diff --git a/fs/sync.c b/fs/sync.c
new file mode 100644
index 0000000000000..8616006d20948
--- /dev/null
+++ b/fs/sync.c
@@ -0,0 +1,164 @@
+/*
+ * High-level sync()-related operations
+ */
+
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/writeback.h>
+#include <linux/syscalls.h>
+#include <linux/linkage.h>
+#include <linux/pagemap.h>
+
+#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
+ SYNC_FILE_RANGE_WAIT_AFTER)
+
+/*
+ * sys_sync_file_range() permits finely controlled syncing over a segment of
+ * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
+ * zero then sys_sync_file_range() will operate from offset out to EOF.
+ *
+ * The flag bits are:
+ *
+ * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
+ * before performing the write.
+ *
+ * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
+ * range which are not presently under writeback.
+ *
+ * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
+ * after performing the write.
+ *
+ * Useful combinations of the flag bits are:
+ *
+ * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
+ * in the range which were dirty on entry to sys_sync_file_range() are placed
+ * under writeout. This is a start-write-for-data-integrity operation.
+ *
+ * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
+ * are not presently under writeout. This is an asynchronous flush-to-disk
+ * operation. Not suitable for data integrity operations.
+ *
+ * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
+ * completion of writeout of all pages in the range. This will be used after an
+ * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
+ * for that operation to complete and to return the result.
+ *
+ * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
+ * a traditional sync() operation. This is a write-for-data-integrity operation
+ * which will ensure that all pages in the range which were dirty on entry to
+ * sys_sync_file_range() are committed to disk.
+ *
+ *
+ * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
+ * I/O errors or ENOSPC conditions and will return those to the caller, after
+ * clearing the EIO and ENOSPC flags in the address_space.
+ *
+ * It should be noted that none of these operations write out the file's
+ * metadata. So unless the application is strictly performing overwrites of
+ * already-instantiated disk blocks, there are no guarantees here that the data
+ * will be available after a crash.
+ */
+asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
+ int flags)
+{
+ int ret;
+ struct file *file;
+ loff_t endbyte; /* inclusive */
+ int fput_needed;
+ umode_t i_mode;
+
+ ret = -EINVAL;
+ if (flags & ~VALID_FLAGS)
+ goto out;
+
+ endbyte = offset + nbytes;
+
+ if ((s64)offset < 0)
+ goto out;
+ if ((s64)endbyte < 0)
+ goto out;
+ if (endbyte < offset)
+ goto out;
+
+ if (sizeof(pgoff_t) == 4) {
+ if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
+ /*
+ * The range starts outside a 32 bit machine's
+ * pagecache addressing capabilities. Let it "succeed"
+ */
+ ret = 0;
+ goto out;
+ }
+ if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
+ /*
+ * Out to EOF
+ */
+ nbytes = 0;
+ }
+ }
+
+ if (nbytes == 0)
+ endbyte = -1;
+ else
+ endbyte--; /* inclusive */
+
+ ret = -EBADF;
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ goto out;
+
+ i_mode = file->f_dentry->d_inode->i_mode;
+ ret = -ESPIPE;
+ if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
+ !S_ISLNK(i_mode))
+ goto out_put;
+
+ ret = do_sync_file_range(file, offset, endbyte, flags);
+out_put:
+ fput_light(file, fput_needed);
+out:
+ return ret;
+}
+
+/*
+ * `endbyte' is inclusive
+ */
+int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte,
+ int flags)
+{
+ int ret;
+ struct address_space *mapping;
+
+ mapping = file->f_mapping;
+ if (!mapping) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+ if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
+ ret = wait_on_page_writeback_range(mapping,
+ offset >> PAGE_CACHE_SHIFT,
+ endbyte >> PAGE_CACHE_SHIFT);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (flags & SYNC_FILE_RANGE_WRITE) {
+ ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
+ WB_SYNC_NONE);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (flags & SYNC_FILE_RANGE_WAIT_AFTER) {
+ ret = wait_on_page_writeback_range(mapping,
+ offset >> PAGE_CACHE_SHIFT,
+ endbyte >> PAGE_CACHE_SHIFT);
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(do_sync_file_range);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index f26880a4785ee..6cfdc9a87772b 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -50,7 +50,7 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
return sd;
}
-/**
+/*
*
* Return -EEXIST if there is already a sysfs element with the same name for
* the same parent.
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 830f76fa098c5..f1cb1ddde511d 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -183,7 +183,7 @@ fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t
return -ENOMEM;
if (count >= PAGE_SIZE)
- count = PAGE_SIZE;
+ count = PAGE_SIZE - 1;
error = copy_from_user(buffer->page,buf,count);
buffer->needs_read_fill = 1;
return error ? -EFAULT : count;
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 4c29ac41ac3ec..f0b347bd12ca5 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -175,8 +175,7 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
struct bin_attribute * bin_attr;
struct sysfs_symlink * sl;
- if (!sd || !sd->s_element)
- BUG();
+ BUG_ON(!sd || !sd->s_element);
switch (sd->s_type) {
case SYSFS_DIR:
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 8c66e9270dd64..d7074341ee87e 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -253,8 +253,7 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
lock_page(page);
err = mapping->a_ops->prepare_write(NULL, page, from, to);
- if (err)
- BUG();
+ BUG_ON(err);
de->inode = 0;
err = dir_commit_chunk(page, from, to);
dir_put_page(page);
@@ -353,8 +352,7 @@ void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
lock_page(page);
err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
- if (err)
- BUG();
+ BUG_ON(err);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
err = dir_commit_chunk(page, from, to);
dir_put_page(page);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 81e0e8459af11..2983afd5e7fd4 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -312,12 +312,10 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head
err = 0;
bh = inode_getblk(inode, block, &err, &phys, &new);
- if (bh)
- BUG();
+ BUG_ON(bh);
if (err)
goto abort;
- if (!phys)
- BUG();
+ BUG_ON(!phys);
if (new)
set_buffer_new(bh_result);
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index ef46939c0c1a9..a56cec3be5f09 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -185,24 +185,6 @@ static int vfat_valid_longname(const unsigned char *name, unsigned int len)
return -EINVAL;
if (len >= 256)
return -ENAMETOOLONG;
-
- /* MS-DOS "device special files" */
- if (len == 3 || (len > 3 && name[3] == '.')) { /* basename == 3 */
- if (!strnicmp(name, "aux", 3) ||
- !strnicmp(name, "con", 3) ||
- !strnicmp(name, "nul", 3) ||
- !strnicmp(name, "prn", 3))
- return -EINVAL;
- }
- if (len == 4 || (len > 4 && name[4] == '.')) { /* basename == 4 */
- /* "com1", "com2", ... */
- if ('1' <= name[3] && name[3] <= '9') {
- if (!strnicmp(name, "com", 3) ||
- !strnicmp(name, "lpt", 3))
- return -EINVAL;
- }
- }
-
return 0;
}
diff --git a/include/asm-arm/arch-at91rm9200/hardware.h b/include/asm-arm/arch-at91rm9200/hardware.h
index 2646c01f8e97d..59e6f44d3a0db 100644
--- a/include/asm-arm/arch-at91rm9200/hardware.h
+++ b/include/asm-arm/arch-at91rm9200/hardware.h
@@ -65,6 +65,9 @@
/* SmartMedia */
#define AT91_SMARTMEDIA_BASE 0x40000000 /* NCS3: Smartmedia physical base address */
+/* Compact Flash */
+#define AT91_CF_BASE 0x50000000 /* NCS4-NCS6: Compact Flash physical base address */
+
/* Multi-Master Memory controller */
#define AT91_UHP_BASE 0x00300000 /* USB Host controller */
diff --git a/include/asm-arm/arch-ixp23xx/uncompress.h b/include/asm-arm/arch-ixp23xx/uncompress.h
index 62623fa9b2f7a..013575e6a9a1b 100644
--- a/include/asm-arm/arch-ixp23xx/uncompress.h
+++ b/include/asm-arm/arch-ixp23xx/uncompress.h
@@ -16,26 +16,21 @@
#define UART_BASE ((volatile u32 *)IXP23XX_UART1_PHYS)
-static __inline__ void putc(char c)
+static inline void putc(char c)
{
int j;
for (j = 0; j < 0x1000; j++) {
if (UART_BASE[UART_LSR] & UART_LSR_THRE)
break;
+ barrier();
}
UART_BASE[UART_TX] = c;
}
-static void putstr(const char *s)
+static inline void flush(void)
{
- while (*s) {
- putc(*s);
- if (*s == '\n')
- putc('\r');
- s++;
- }
}
#define arch_decomp_setup()
diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h
index 1409c5bd703f0..c8f53a71c076b 100644
--- a/include/asm-arm/arch-pxa/pxa-regs.h
+++ b/include/asm-arm/arch-pxa/pxa-regs.h
@@ -485,7 +485,7 @@
#define SACR1_ENLBF (1 << 5) /* Enable Loopback */
#define SACR1_DRPL (1 << 4) /* Disable Replaying Function */
#define SACR1_DREC (1 << 3) /* Disable Recording Function */
-#define SACR1_AMSL (1 << 1) /* Specify Alternate Mode */
+#define SACR1_AMSL (1 << 0) /* Specify Alternate Mode */
#define SASR0_I2SOFF (1 << 7) /* Controller Status */
#define SASR0_ROR (1 << 6) /* Rx FIFO Overrun */
diff --git a/include/asm-arm/arch-pxa/sharpsl.h b/include/asm-arm/arch-pxa/sharpsl.h
index 0b43495d24b4c..94cb4982af82e 100644
--- a/include/asm-arm/arch-pxa/sharpsl.h
+++ b/include/asm-arm/arch-pxa/sharpsl.h
@@ -27,6 +27,8 @@ struct corgits_machinfo {
*/
struct corgibl_machinfo {
int max_intensity;
+ int default_intensity;
+ int limit_mask;
void (*set_bl_intensity)(int intensity);
};
extern void corgibl_limit_intensity(int limit);
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index 8f331bbd39a84..65ac305c2d457 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -308,8 +308,6 @@
#define __NR_mq_notify (__NR_SYSCALL_BASE+278)
#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279)
#define __NR_waitid (__NR_SYSCALL_BASE+280)
-
-#if defined(__ARM_EABI__) /* reserve these for un-muxing socketcall */
#define __NR_socket (__NR_SYSCALL_BASE+281)
#define __NR_bind (__NR_SYSCALL_BASE+282)
#define __NR_connect (__NR_SYSCALL_BASE+283)
@@ -327,9 +325,6 @@
#define __NR_getsockopt (__NR_SYSCALL_BASE+295)
#define __NR_sendmsg (__NR_SYSCALL_BASE+296)
#define __NR_recvmsg (__NR_SYSCALL_BASE+297)
-#endif
-
-#if defined(__ARM_EABI__) /* reserve these for un-muxing ipc */
#define __NR_semop (__NR_SYSCALL_BASE+298)
#define __NR_semget (__NR_SYSCALL_BASE+299)
#define __NR_semctl (__NR_SYSCALL_BASE+300)
@@ -341,16 +336,10 @@
#define __NR_shmdt (__NR_SYSCALL_BASE+306)
#define __NR_shmget (__NR_SYSCALL_BASE+307)
#define __NR_shmctl (__NR_SYSCALL_BASE+308)
-#endif
-
#define __NR_add_key (__NR_SYSCALL_BASE+309)
#define __NR_request_key (__NR_SYSCALL_BASE+310)
#define __NR_keyctl (__NR_SYSCALL_BASE+311)
-
-#if defined(__ARM_EABI__) /* reserved for un-muxing ipc */
#define __NR_semtimedop (__NR_SYSCALL_BASE+312)
-#endif
-
#define __NR_vserver (__NR_SYSCALL_BASE+313)
#define __NR_ioprio_set (__NR_SYSCALL_BASE+314)
#define __NR_ioprio_get (__NR_SYSCALL_BASE+315)
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index de4614840c2cd..9291c24f5819b 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -7,8 +7,15 @@
#include <asm/atomic.h>
#include <asm/types.h>
-/* An unsigned long type for operations which are atomic for a single
- * CPU. Usually used in combination with per-cpu variables. */
+/*
+ * A signed long type for operations which are atomic for a single CPU.
+ * Usually used in combination with per-cpu variables.
+ *
+ * This is the default implementation, which uses atomic_long_t. Which is
+ * rather pointless. The whole point behind local_t is that some processors
+ * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
+ * running on this CPU. local_t allows exploitation of such capabilities.
+ */
/* Implement in terms of atomics. */
@@ -20,7 +27,7 @@ typedef struct
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-#define local_read(l) ((unsigned long)atomic_long_read(&(l)->a))
+#define local_read(l) atomic_long_read(&(l)->a)
#define local_set(l,i) atomic_long_set((&(l)->a),(i))
#define local_inc(l) atomic_long_inc(&(l)->a)
#define local_dec(l) atomic_long_dec(&(l)->a)
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index 40c6d1f865984..29c6ac34e236f 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -17,13 +17,14 @@
* it wasn't 1 originally. This function MUST leave the value lower than
* 1 even when the "1" assertion wasn't true.
*/
-#define __mutex_fastpath_lock(count, fail_fn) \
-do { \
- if (unlikely(atomic_dec_return(count) < 0)) \
- fail_fn(count); \
- else \
- smp_mb(); \
-} while (0)
+static inline void
+__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ fail_fn(count);
+ else
+ smp_mb();
+}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -36,7 +37,7 @@ do { \
* or anything the slow path function returns.
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
@@ -59,12 +60,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
-#define __mutex_fastpath_unlock(count, fail_fn) \
-do { \
- smp_mb(); \
- if (unlikely(atomic_inc_return(count) <= 0)) \
- fail_fn(count); \
-} while (0)
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ smp_mb();
+ if (unlikely(atomic_inc_return(count) <= 0))
+ fail_fn(count);
+}
#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 1d24f47e6c482..32a2100c1aebf 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -3,7 +3,7 @@
*
* Generic implementation of the mutex fastpath, based on xchg().
*
- * NOTE: An xchg based implementation is less optimal than an atomic
+ * NOTE: An xchg based implementation might be less optimal than an atomic
* decrement/increment based implementation. If your architecture
* has a reasonable atomic dec/inc then you should probably use
* asm-generic/mutex-dec.h instead, or you could open-code an
@@ -22,14 +22,14 @@
* wasn't 1 originally. This function MUST leave the value lower than 1
* even when the "1" assertion wasn't true.
*/
-#define __mutex_fastpath_lock(count, fail_fn) \
-do { \
- if (unlikely(atomic_xchg(count, 0) != 1)) \
- fail_fn(count); \
- else \
- smp_mb(); \
-} while (0)
-
+static inline void
+__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_xchg(count, 0) != 1))
+ fail_fn(count);
+ else
+ smp_mb();
+}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -42,7 +42,7 @@ do { \
* or anything the slow path function returns
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
return fail_fn(count);
@@ -64,12 +64,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
-#define __mutex_fastpath_unlock(count, fail_fn) \
-do { \
- smp_mb(); \
- if (unlikely(atomic_xchg(count, 1) != 0)) \
- fail_fn(count); \
-} while (0)
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+ smp_mb();
+ if (unlikely(atomic_xchg(count, 1) != 0))
+ fail_fn(count);
+}
#define __mutex_slowpath_needs_to_unlock() 0
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h
index 03185cef8e0a3..5e4a35af29217 100644
--- a/include/asm-i386/apicdef.h
+++ b/include/asm-i386/apicdef.h
@@ -37,6 +37,7 @@
#define APIC_SPIV_FOCUS_DISABLED (1<<9)
#define APIC_SPIV_APIC_ENABLED (1<<8)
#define APIC_ISR 0x100
+#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
#define APIC_IRR 0x200
#define APIC_ESR 0x280
diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h
index 79727afb94c95..03403045c1829 100644
--- a/include/asm-i386/floppy.h
+++ b/include/asm-i386/floppy.h
@@ -56,7 +56,6 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
register unsigned char st;
#undef TRACE_FLPY_INT
-#define NO_FLOPPY_ASSEMBLER
#ifdef TRACE_FLPY_INT
static int calls=0;
@@ -71,38 +70,6 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
bytes = virtual_dma_count;
#endif
-#ifndef NO_FLOPPY_ASSEMBLER
- __asm__ (
- "testl %1,%1"
- "je 3f"
-"1: inb %w4,%b0"
- "andb $160,%b0"
- "cmpb $160,%b0"
- "jne 2f"
- "incw %w4"
- "testl %3,%3"
- "jne 4f"
- "inb %w4,%b0"
- "movb %0,(%2)"
- "jmp 5f"
-"4: movb (%2),%0"
- "outb %b0,%w4"
-"5: decw %w4"
- "outb %0,$0x80"
- "decl %1"
- "incl %2"
- "testl %1,%1"
- "jne 1b"
-"3: inb %w4,%b0"
-"2: "
- : "=a" ((char) st),
- "=c" ((long) virtual_dma_count),
- "=S" ((long) virtual_dma_addr)
- : "b" ((long) virtual_dma_mode),
- "d" ((short) virtual_dma_port+4),
- "1" ((long) virtual_dma_count),
- "2" ((long) virtual_dma_addr));
-#else
{
register int lcount;
register char *lptr;
@@ -122,7 +89,6 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
virtual_dma_addr = lptr;
st = inb(virtual_dma_port+4);
}
-#endif
#ifdef TRACE_FLPY_INT
calls++;
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index 0177da80dde34..e67fa08260fe8 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -5,7 +5,7 @@
typedef struct
{
- volatile unsigned long counter;
+ volatile long counter;
} local_t;
#define LOCAL_INIT(i) { (i) }
@@ -29,7 +29,7 @@ static __inline__ void local_dec(local_t *v)
:"m" (v->counter));
}
-static __inline__ void local_add(unsigned long i, local_t *v)
+static __inline__ void local_add(long i, local_t *v)
{
__asm__ __volatile__(
"addl %1,%0"
@@ -37,7 +37,7 @@ static __inline__ void local_add(unsigned long i, local_t *v)
:"ir" (i), "m" (v->counter));
}
-static __inline__ void local_sub(unsigned long i, local_t *v)
+static __inline__ void local_sub(long i, local_t *v)
{
__asm__ __volatile__(
"subl %1,%0"
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 789e9bdd0a406..2e7f3e257fddf 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -319,8 +319,9 @@
#define __NR_set_robust_list 311
#define __NR_get_robust_list 312
#define __NR_sys_splice 313
+#define __NR_sys_sync_file_range 314
-#define NR_syscalls 314
+#define NR_syscalls 315
/*
* user-visible error numbers are in the range -1 - -128: see
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 4e7e6f23b08c8..37e52a2836b03 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -68,6 +68,7 @@
#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
#define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */
+#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@@ -130,7 +131,7 @@ typedef u64 pal_cache_line_state_t;
#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */
typedef struct pal_freq_ratio {
- u64 den : 32, num : 32; /* numerator & denominator */
+ u32 den, num; /* numerator & denominator */
} itc_ratio, proc_ratio;
typedef union pal_cache_config_info_1_s {
@@ -151,10 +152,10 @@ typedef union pal_cache_config_info_1_s {
typedef union pal_cache_config_info_2_s {
struct {
- u64 cache_size : 32, /*cache size in bytes*/
+ u32 cache_size; /*cache size in bytes*/
- alias_boundary : 8, /* 39-32 aliased addr
+ u32 alias_boundary : 8, /* 39-32 aliased addr
* separation for max
* performance.
*/
@@ -1647,6 +1648,33 @@ ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
return iprv.status;
}
+
+typedef struct pal_cache_shared_info_s
+{
+ u64 num_shared;
+ pal_proc_n_log_info1_t ppli1;
+ pal_proc_n_log_info2_t ppli2;
+} pal_cache_shared_info_t;
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_cache_shared_info(u64 level,
+ u64 type,
+ u64 proc_number,
+ pal_cache_shared_info_t *info)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number);
+
+ if (iprv.status == PAL_STATUS_SUCCESS) {
+ info->num_shared = iprv.v0;
+ info->ppli1.ppli1_data = iprv.v1;
+ info->ppli2.ppli2_data = iprv.v2;
+ }
+
+ return iprv.status;
+}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_PAL_H */
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h
index 5207758a6dd9d..868c7139dbfff 100644
--- a/include/asm-powerpc/eeh.h
+++ b/include/asm-powerpc/eeh.h
@@ -60,24 +60,10 @@ void __init pci_addr_cache_build(void);
* device (including config space i/o). Call eeh_add_device_late
* to finish the eeh setup for this device.
*/
-void eeh_add_device_early(struct device_node *);
-void eeh_add_device_late(struct pci_dev *dev);
void eeh_add_device_tree_early(struct device_node *);
void eeh_add_device_tree_late(struct pci_bus *);
/**
- * eeh_remove_device - undo EEH setup for the indicated pci device
- * @dev: pci device to be removed
- *
- * This routine should be called when a device is removed from
- * a running system (e.g. by hotplug or dlpar). It unregisters
- * the PCI device from the EEH subsystem. I/O errors affecting
- * this device will no longer be detected after this call; thus,
- * i/o errors affecting this slot may leave this device unusable.
- */
-void eeh_remove_device(struct pci_dev *);
-
-/**
* eeh_remove_device_recursive - undo EEH for device & children.
* @dev: pci device to be removed
*
@@ -116,12 +102,6 @@ static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *d
static inline void pci_addr_cache_build(void) { }
-static inline void eeh_add_device_early(struct device_node *dn) { }
-
-static inline void eeh_add_device_late(struct pci_dev *dev) { }
-
-static inline void eeh_remove_device(struct pci_dev *dev) { }
-
static inline void eeh_add_device_tree_early(struct device_node *dn) { }
static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h
index b72c04f3f5519..6cc7e1fb7bfd9 100644
--- a/include/asm-powerpc/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -4,47 +4,88 @@
#define HVSC .long 0x44000022
-#define H_Success 0
-#define H_Busy 1 /* Hardware busy -- retry later */
-#define H_Closed 2 /* Resource closed */
-#define H_Constrained 4 /* Resource request constrained to max allowed */
-#define H_InProgress 14 /* Kind of like busy */
-#define H_Pending 17 /* returned from H_POLL_PENDING */
-#define H_Continue 18 /* Returned from H_Join on success */
-#define H_LongBusyStartRange 9900 /* Start of long busy range */
-#define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */
-#define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */
-#define H_LongBusyOrder100msec 9902 /* Long busy, hint that 100msec is a good time to retry */
-#define H_LongBusyOrder1sec 9903 /* Long busy, hint that 1sec is a good time to retry */
-#define H_LongBusyOrder10sec 9904 /* Long busy, hint that 10sec is a good time to retry */
-#define H_LongBusyOrder100sec 9905 /* Long busy, hint that 100sec is a good time to retry */
-#define H_LongBusyEndRange 9905 /* End of long busy range */
-#define H_Hardware -1 /* Hardware error */
-#define H_Function -2 /* Function not supported */
-#define H_Privilege -3 /* Caller not privileged */
-#define H_Parameter -4 /* Parameter invalid, out-of-range or conflicting */
-#define H_Bad_Mode -5 /* Illegal msr value */
-#define H_PTEG_Full -6 /* PTEG is full */
-#define H_Not_Found -7 /* PTE was not found" */
-#define H_Reserved_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
-#define H_NoMem -9
-#define H_Authority -10
-#define H_Permission -11
-#define H_Dropped -12
-#define H_SourceParm -13
-#define H_DestParm -14
-#define H_RemoteParm -15
-#define H_Resource -16
+#define H_SUCCESS 0
+#define H_BUSY 1 /* Hardware busy -- retry later */
+#define H_CLOSED 2 /* Resource closed */
+#define H_NOT_AVAILABLE 3
+#define H_CONSTRAINED 4 /* Resource request constrained to max allowed */
+#define H_PARTIAL 5
+#define H_IN_PROGRESS 14 /* Kind of like busy */
+#define H_PAGE_REGISTERED 15
+#define H_PARTIAL_STORE 16
+#define H_PENDING 17 /* returned from H_POLL_PENDING */
+#define H_CONTINUE 18 /* Returned from H_Join on success */
+#define H_LONG_BUSY_START_RANGE 9900 /* Start of long busy range */
+#define H_LONG_BUSY_ORDER_1_MSEC 9900 /* Long busy, hint that 1msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_10_MSEC 9901 /* Long busy, hint that 10msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_100_MSEC 9902 /* Long busy, hint that 100msec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_1_SEC 9903 /* Long busy, hint that 1sec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_10_SEC 9904 /* Long busy, hint that 10sec \
+ is a good time to retry */
+#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
+ is a good time to retry */
+#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
+#define H_HARDWARE -1 /* Hardware error */
+#define H_FUNCTION -2 /* Function not supported */
+#define H_PRIVILEGE -3 /* Caller not privileged */
+#define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */
+#define H_BAD_MODE -5 /* Illegal msr value */
+#define H_PTEG_FULL -6 /* PTEG is full */
+#define H_NOT_FOUND -7 /* PTE was not found" */
+#define H_RESERVED_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
+#define H_NO_MEM -9
+#define H_AUTHORITY -10
+#define H_PERMISSION -11
+#define H_DROPPED -12
+#define H_SOURCE_PARM -13
+#define H_DEST_PARM -14
+#define H_REMOTE_PARM -15
+#define H_RESOURCE -16
+#define H_ADAPTER_PARM -17
+#define H_RH_PARM -18
+#define H_RCQ_PARM -19
+#define H_SCQ_PARM -20
+#define H_EQ_PARM -21
+#define H_RT_PARM -22
+#define H_ST_PARM -23
+#define H_SIGT_PARM -24
+#define H_TOKEN_PARM -25
+#define H_MLENGTH_PARM -27
+#define H_MEM_PARM -28
+#define H_MEM_ACCESS_PARM -29
+#define H_ATTR_PARM -30
+#define H_PORT_PARM -31
+#define H_MCG_PARM -32
+#define H_VL_PARM -33
+#define H_TSIZE_PARM -34
+#define H_TRACE_PARM -35
+
+#define H_MASK_PARM -37
+#define H_MCG_FULL -38
+#define H_ALIAS_EXIST -39
+#define H_P_COUNTER -40
+#define H_TABLE_FULL -41
+#define H_ALT_TABLE -42
+#define H_MR_CONDITION -43
+#define H_NOT_ENOUGH_RESOURCES -44
+#define H_R_STATE -45
+#define H_RESCINDEND -46
+
/* Long Busy is a condition that can be returned by the firmware
* when a call cannot be completed now, but the identical call
* should be retried later. This prevents calls blocking in the
- * firmware for long periods of time. Annoyingly the firmware can return
+ * firmware for long periods of time. Annoyingly the firmware can return
* a range of return codes, hinting at how long we should wait before
* retrying. If you don't care for the hint, the macro below is a good
* way to check for the long_busy return codes
*/
-#define H_isLongBusy(x) ((x >= H_LongBusyStartRange) && (x <= H_LongBusyEndRange))
+#define H_IS_LONG_BUSY(x) ((x >= H_LONG_BUSY_START_RANGE) \
+ && (x <= H_LONG_BUSY_END_RANGE))
/* Flags */
#define H_LARGE_PAGE (1UL<<(63-16))
@@ -66,6 +107,9 @@
#define H_DABRX_KERNEL (1UL<<(63-62))
#define H_DABRX_USER (1UL<<(63-63))
+/* Each control block has to be on a 4K bondary */
+#define H_CB_ALIGNMENT 4096
+
/* pSeries hypervisor opcodes */
#define H_REMOVE 0x04
#define H_ENTER 0x08
@@ -99,25 +143,52 @@
#define H_PERFMON 0x7c
#define H_MIGRATE_DMA 0x78
#define H_REGISTER_VPA 0xDC
-#define H_CEDE 0xE0
+#define H_CEDE 0xE0
#define H_CONFER 0xE4
-#define H_PROD 0xE8
+#define H_PROD 0xE8
#define H_GET_PPP 0xEC
#define H_SET_PPP 0xF0
#define H_PURR 0xF4
-#define H_PIC 0xF8
+#define H_PIC 0xF8
#define H_REG_CRQ 0xFC
#define H_FREE_CRQ 0x100
#define H_VIO_SIGNAL 0x104
#define H_SEND_CRQ 0x108
-#define H_COPY_RDMA 0x110
+#define H_COPY_RDMA 0x110
#define H_SET_XDABR 0x134
#define H_STUFF_TCE 0x138
#define H_PUT_TCE_INDIRECT 0x13C
#define H_VTERM_PARTNER_INFO 0x150
#define H_REGISTER_VTERM 0x154
#define H_FREE_VTERM 0x158
-#define H_POLL_PENDING 0x1D8
+#define H_RESET_EVENTS 0x15C
+#define H_ALLOC_RESOURCE 0x160
+#define H_FREE_RESOURCE 0x164
+#define H_MODIFY_QP 0x168
+#define H_QUERY_QP 0x16C
+#define H_REREGISTER_PMR 0x170
+#define H_REGISTER_SMR 0x174
+#define H_QUERY_MR 0x178
+#define H_QUERY_MW 0x17C
+#define H_QUERY_HCA 0x180
+#define H_QUERY_PORT 0x184
+#define H_MODIFY_PORT 0x188
+#define H_DEFINE_AQP1 0x18C
+#define H_GET_TRACE_BUFFER 0x190
+#define H_DEFINE_AQP0 0x194
+#define H_RESIZE_MR 0x198
+#define H_ATTACH_MCQP 0x19C
+#define H_DETACH_MCQP 0x1A0
+#define H_CREATE_RPT 0x1A4
+#define H_REMOVE_RPT 0x1A8
+#define H_REGISTER_RPAGES 0x1AC
+#define H_DISABLE_AND_GETC 0x1B0
+#define H_ERROR_DATA 0x1B4
+#define H_GET_HCA_INFO 0x1B8
+#define H_GET_PERF_COUNT 0x1BC
+#define H_MANAGE_TRACE 0x1C0
+#define H_QUERY_INT_STATE 0x1E4
+#define H_POLL_PENDING 0x1D8
#define H_JOIN 0x298
#define H_ENABLE_CRQ 0x2B0
@@ -152,7 +223,7 @@ long plpar_hcall_norets(unsigned long opcode, ...);
*/
long plpar_hcall_8arg_2ret(unsigned long opcode,
unsigned long arg1,
- unsigned long arg2,
+ unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
@@ -176,6 +247,42 @@ long plpar_hcall_4out(unsigned long opcode,
unsigned long *out3,
unsigned long *out4);
+long plpar_hcall_7arg_7ret(unsigned long opcode,
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5,
+ unsigned long arg6,
+ unsigned long arg7,
+ unsigned long *out1,
+ unsigned long *out2,
+ unsigned long *out3,
+ unsigned long *out4,
+ unsigned long *out5,
+ unsigned long *out6,
+ unsigned long *out7);
+
+long plpar_hcall_9arg_9ret(unsigned long opcode,
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5,
+ unsigned long arg6,
+ unsigned long arg7,
+ unsigned long arg8,
+ unsigned long arg9,
+ unsigned long *out1,
+ unsigned long *out2,
+ unsigned long *out3,
+ unsigned long *out4,
+ unsigned long *out5,
+ unsigned long *out6,
+ unsigned long *out7,
+ unsigned long *out8,
+ unsigned long *out9);
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 65f5a7b2646b1..d075725bf444b 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -365,8 +365,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
+ * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
+ * cacheline alignment of buffers.
*/
-#define NET_IP_ALIGN 0
+#define NET_IP_ALIGN 0
+#define NET_SKB_PAD L1_CACHE_BYTES
#endif
#define arch_align_stack(x) (x)
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index e10ed87094f0c..436d216601e56 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -46,7 +46,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for_each_cpu(__i) \
+ for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset[__i], \
(src), (size)); \
} while (0)
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index 64ec640a40eed..264f0ebeaedce 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -180,7 +180,7 @@
#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
#define __NR_getdomainname 162 /* SunOS Specific */
#define __NR_setdomainname 163 /* Common */
-/* #define __NR_ni_syscall 164 ENOSYS under SunOS */
+/* #define __NR_utrap_install 164 Linux sparc64 specific */
#define __NR_quotactl 165 /* Common */
#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
#define __NR_mount 167 /* Common */
@@ -248,7 +248,7 @@
#define __NR_setfsgid 229 /* Linux Specific */
#define __NR__newselect 230 /* Linux Specific */
#define __NR_time 231 /* Linux Specific */
-/* #define __NR_oldstat 232 Linux Specific */
+#define __NR_sys_splice 232 /* Linux Specific */
#define __NR_stime 233 /* Linux Specific */
#define __NR_statfs64 234 /* Linux Specific */
#define __NR_fstatfs64 235 /* Linux Specific */
@@ -271,7 +271,7 @@
#define __NR_getsid 252
#define __NR_fdatasync 253
#define __NR_nfsservctl 254
-#define __NR_aplib 255
+#define __NR_sys_sync_file_range 255
#define __NR_clock_settime 256
#define __NR_clock_gettime 257
#define __NR_clock_getres 258
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index a284986b15414..d0544b4f47b75 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -250,7 +250,7 @@
#ifdef __KERNEL__
#define __NR_time 231 /* Linux sparc32 */
#endif
-/* #define __NR_oldstat 232 Linux Specific */
+#define __NR_sys_splice 232 /* Linux Specific */
#define __NR_stime 233 /* Linux Specific */
#define __NR_statfs64 234 /* Linux Specific */
#define __NR_fstatfs64 235 /* Linux Specific */
@@ -273,7 +273,7 @@
#define __NR_getsid 252
#define __NR_fdatasync 253
#define __NR_nfsservctl 254
-#define __NR_aplib 255
+#define __NR_sys_sync_file_range 255
#define __NR_clock_settime 256
#define __NR_clock_gettime 257
#define __NR_clock_getres 258
diff --git a/include/asm-um/desc.h b/include/asm-um/desc.h
index ac1d2a20d1781..4ec34a51b62c9 100644
--- a/include/asm-um/desc.h
+++ b/include/asm-um/desc.h
@@ -1,6 +1,16 @@
#ifndef __UM_DESC_H
#define __UM_DESC_H
-#include "asm/arch/desc.h"
+/* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't
+ * compile, and has never been used. */
+#define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 )
#endif
diff --git a/include/asm-um/host_ldt-i386.h b/include/asm-um/host_ldt-i386.h
new file mode 100644
index 0000000000000..b27cb0a9dd30a
--- /dev/null
+++ b/include/asm-um/host_ldt-i386.h
@@ -0,0 +1,34 @@
+#ifndef __ASM_HOST_LDT_I386_H
+#define __ASM_HOST_LDT_I386_H
+
+#include "asm/arch/ldt.h"
+
+/*
+ * macros stolen from include/asm-i386/desc.h
+ */
+#define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info) \
+ (((info)->base_addr & 0xff000000) | \
+ (((info)->base_addr & 0x00ff0000) >> 16) | \
+ ((info)->limit & 0xf0000) | \
+ (((info)->read_exec_only ^ 1) << 9) | \
+ ((info)->contents << 10) | \
+ (((info)->seg_not_present ^ 1) << 15) | \
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+ 0x7000)
+
+#define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 )
+
+#endif
diff --git a/include/asm-um/ldt-x86_64.h b/include/asm-um/host_ldt-x86_64.h
index 96b35aada79a4..74a63f7d9a90b 100644
--- a/include/asm-um/ldt-x86_64.h
+++ b/include/asm-um/host_ldt-x86_64.h
@@ -1,43 +1,8 @@
-/*
- * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
- * Licensed under the GPL
- *
- * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
- */
+#ifndef __ASM_HOST_LDT_X86_64_H
+#define __ASM_HOST_LDT_X86_64_H
-#ifndef __ASM_LDT_X86_64_H
-#define __ASM_LDT_X86_64_H
-
-#include "asm/semaphore.h"
#include "asm/arch/ldt.h"
-struct mmu_context_skas;
-extern void ldt_host_info(void);
-extern long init_new_ldt(struct mmu_context_skas * to_mm,
- struct mmu_context_skas * from_mm);
-extern void free_ldt(struct mmu_context_skas * mm);
-
-#define LDT_PAGES_MAX \
- ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
-#define LDT_ENTRIES_PER_PAGE \
- (PAGE_SIZE/LDT_ENTRY_SIZE)
-#define LDT_DIRECT_ENTRIES \
- ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
-
-struct ldt_entry {
- __u32 a;
- __u32 b;
-};
-
-typedef struct uml_ldt {
- int entry_count;
- struct semaphore semaphore;
- union {
- struct ldt_entry * pages[LDT_PAGES_MAX];
- struct ldt_entry entries[LDT_DIRECT_ENTRIES];
- } u;
-} uml_ldt_t;
-
/*
* macros stolen from include/asm-x86_64/desc.h
*/
diff --git a/include/asm-um/ldt-i386.h b/include/asm-um/ldt-i386.h
deleted file mode 100644
index 175722a911647..0000000000000
--- a/include/asm-um/ldt-i386.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
- * Licensed under the GPL
- *
- * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
- */
-
-#ifndef __ASM_LDT_I386_H
-#define __ASM_LDT_I386_H
-
-#include "asm/semaphore.h"
-#include "asm/arch/ldt.h"
-
-struct mmu_context_skas;
-extern void ldt_host_info(void);
-extern long init_new_ldt(struct mmu_context_skas * to_mm,
- struct mmu_context_skas * from_mm);
-extern void free_ldt(struct mmu_context_skas * mm);
-
-#define LDT_PAGES_MAX \
- ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
-#define LDT_ENTRIES_PER_PAGE \
- (PAGE_SIZE/LDT_ENTRY_SIZE)
-#define LDT_DIRECT_ENTRIES \
- ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
-
-struct ldt_entry {
- __u32 a;
- __u32 b;
-};
-
-typedef struct uml_ldt {
- int entry_count;
- struct semaphore semaphore;
- union {
- struct ldt_entry * pages[LDT_PAGES_MAX];
- struct ldt_entry entries[LDT_DIRECT_ENTRIES];
- } u;
-} uml_ldt_t;
-
-/*
- * macros stolen from include/asm-i386/desc.h
- */
-#define LDT_entry_a(info) \
- ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-
-#define LDT_entry_b(info) \
- (((info)->base_addr & 0xff000000) | \
- (((info)->base_addr & 0x00ff0000) >> 16) | \
- ((info)->limit & 0xf0000) | \
- (((info)->read_exec_only ^ 1) << 9) | \
- ((info)->contents << 10) | \
- (((info)->seg_not_present ^ 1) << 15) | \
- ((info)->seg_32bit << 22) | \
- ((info)->limit_in_pages << 23) | \
- ((info)->useable << 20) | \
- 0x7000)
-
-#define LDT_empty(info) (\
- (info)->base_addr == 0 && \
- (info)->limit == 0 && \
- (info)->contents == 0 && \
- (info)->read_exec_only == 1 && \
- (info)->seg_32bit == 0 && \
- (info)->limit_in_pages == 0 && \
- (info)->seg_not_present == 1 && \
- (info)->useable == 0 )
-
-#endif
diff --git a/include/asm-um/ldt.h b/include/asm-um/ldt.h
new file mode 100644
index 0000000000000..96f82a456ce65
--- /dev/null
+++ b/include/asm-um/ldt.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
+ * Licensed under the GPL
+ *
+ * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
+ */
+
+#ifndef __ASM_LDT_H
+#define __ASM_LDT_H
+
+#include "asm/semaphore.h"
+#include "asm/host_ldt.h"
+
+struct mmu_context_skas;
+extern void ldt_host_info(void);
+extern long init_new_ldt(struct mmu_context_skas * to_mm,
+ struct mmu_context_skas * from_mm);
+extern void free_ldt(struct mmu_context_skas * mm);
+
+#define LDT_PAGES_MAX \
+ ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
+#define LDT_ENTRIES_PER_PAGE \
+ (PAGE_SIZE/LDT_ENTRY_SIZE)
+#define LDT_DIRECT_ENTRIES \
+ ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
+
+struct ldt_entry {
+ __u32 a;
+ __u32 b;
+};
+
+typedef struct uml_ldt {
+ int entry_count;
+ struct semaphore semaphore;
+ union {
+ struct ldt_entry * pages[LDT_PAGES_MAX];
+ struct ldt_entry entries[LDT_DIRECT_ENTRIES];
+ } u;
+} uml_ldt_t;
+
+#endif
diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h
index 4108a579eb92a..595f1c3e1e400 100644
--- a/include/asm-um/processor-i386.h
+++ b/include/asm-um/processor-i386.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -6,21 +6,48 @@
#ifndef __UM_PROCESSOR_I386_H
#define __UM_PROCESSOR_I386_H
+#include "linux/string.h"
+#include "asm/host_ldt.h"
+#include "asm/segment.h"
+
extern int host_has_xmm;
extern int host_has_cmov;
/* include faultinfo structure */
#include "sysdep/faultinfo.h"
+struct uml_tls_struct {
+ struct user_desc tls;
+ unsigned flushed:1;
+ unsigned present:1;
+};
+
struct arch_thread {
+ struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
unsigned long debugregs[8];
int debugregs_seq;
struct faultinfo faultinfo;
};
-#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \
- .debugregs_seq = 0, \
- .faultinfo = { 0, 0, 0 } }
+#define INIT_ARCH_THREAD { \
+ .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \
+ { .present = 0, .flushed = 0 } }, \
+ .debugregs = { [ 0 ... 7 ] = 0 }, \
+ .debugregs_seq = 0, \
+ .faultinfo = { 0, 0, 0 } \
+}
+
+static inline void arch_flush_thread(struct arch_thread *thread)
+{
+ /* Clear any TLS still hanging */
+ memset(&thread->tls_array, 0, sizeof(thread->tls_array));
+}
+
+static inline void arch_copy_thread(struct arch_thread *from,
+ struct arch_thread *to)
+{
+ memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
+}
#include "asm/arch/user.h"
diff --git a/include/asm-um/processor-x86_64.h b/include/asm-um/processor-x86_64.h
index e1e1255a1d365..10609af376c03 100644
--- a/include/asm-um/processor-x86_64.h
+++ b/include/asm-um/processor-x86_64.h
@@ -28,6 +28,15 @@ extern inline void rep_nop(void)
.debugregs_seq = 0, \
.faultinfo = { 0, 0, 0 } }
+static inline void arch_flush_thread(struct arch_thread *thread)
+{
+}
+
+static inline void arch_copy_thread(struct arch_thread *from,
+ struct arch_thread *to)
+{
+}
+
#include "asm/arch/user.h"
#define current_text_addr() \
diff --git a/include/asm-um/ptrace-generic.h b/include/asm-um/ptrace-generic.h
index 46599ac440372..503484305e677 100644
--- a/include/asm-um/ptrace-generic.h
+++ b/include/asm-um/ptrace-generic.h
@@ -28,7 +28,7 @@ struct pt_regs {
union uml_pt_regs regs;
};
-#define EMPTY_REGS { regs : EMPTY_UML_PT_REGS }
+#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
@@ -60,17 +60,9 @@ extern void show_regs(struct pt_regs *regs);
extern void send_sigtrap(struct task_struct *tsk, union uml_pt_regs *regs,
int error_code);
-#endif
+extern int arch_copy_tls(struct task_struct *new);
+extern void clear_flushed_tls(struct task_struct *task);
#endif
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+#endif
diff --git a/include/asm-um/ptrace-i386.h b/include/asm-um/ptrace-i386.h
index fe882b9d917eb..30656c962d742 100644
--- a/include/asm-um/ptrace-i386.h
+++ b/include/asm-um/ptrace-i386.h
@@ -8,8 +8,11 @@
#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
+#include "linux/compiler.h"
#include "sysdep/ptrace.h"
#include "asm/ptrace-generic.h"
+#include "asm/host_ldt.h"
+#include "choose-mode.h"
#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs)
#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs)
@@ -38,15 +41,31 @@
#define user_mode(r) UPT_IS_USER(&(r)->regs)
-#endif
+extern int ptrace_get_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc);
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+extern int ptrace_set_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc);
+
+extern int do_set_thread_area_skas(struct user_desc *info);
+extern int do_get_thread_area_skas(struct user_desc *info);
+
+extern int do_set_thread_area_tt(struct user_desc *info);
+extern int do_get_thread_area_tt(struct user_desc *info);
+
+extern int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to);
+extern int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to);
+
+static inline int do_get_thread_area(struct user_desc *info)
+{
+ return CHOOSE_MODE_PROC(do_get_thread_area_tt, do_get_thread_area_skas, info);
+}
+
+static inline int do_set_thread_area(struct user_desc *info)
+{
+ return CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, info);
+}
+
+struct task_struct;
+
+#endif
diff --git a/include/asm-um/ptrace-x86_64.h b/include/asm-um/ptrace-x86_64.h
index be51219a8ffe4..c894e68b1f969 100644
--- a/include/asm-um/ptrace-x86_64.h
+++ b/include/asm-um/ptrace-x86_64.h
@@ -8,6 +8,8 @@
#define __UM_PTRACE_X86_64_H
#include "linux/compiler.h"
+#include "asm/errno.h"
+#include "asm/host_ldt.h"
#define signal_fault signal_fault_x86_64
#define __FRAME_OFFSETS /* Needed to get the R* macros */
@@ -63,15 +65,26 @@ void signal_fault(struct pt_regs_subarch *regs, void *frame, char *where);
#define profile_pc(regs) PT_REGS_IP(regs)
-#endif
+static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc)
+{
+ return -ENOSYS;
+}
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc)
+{
+ return -ENOSYS;
+}
+
+static inline void arch_switch_to_tt(struct task_struct *from,
+ struct task_struct *to)
+{
+}
+
+static inline void arch_switch_to_skas(struct task_struct *from,
+ struct task_struct *to)
+{
+}
+
+#endif
diff --git a/include/asm-um/segment.h b/include/asm-um/segment.h
index 55e40301f6251..45183fcd10b6c 100644
--- a/include/asm-um/segment.h
+++ b/include/asm-um/segment.h
@@ -1,4 +1,10 @@
#ifndef __UM_SEGMENT_H
#define __UM_SEGMENT_H
+extern int host_gdt_entry_tls_min;
+
+#define GDT_ENTRY_TLS_ENTRIES 3
+#define GDT_ENTRY_TLS_MIN host_gdt_entry_tls_min
+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
#endif
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 17b6b07c4332f..f166b9837c6a5 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -27,14 +27,14 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
- task: &tsk, \
- exec_domain: &default_exec_domain, \
- flags: 0, \
- cpu: 0, \
- preempt_count: 1, \
- addr_limit: KERNEL_DS, \
- restart_block: { \
- fn: do_no_restart_syscall, \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
}, \
}
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index 4e460d6f5ac82..bea5a015f6674 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -57,7 +57,7 @@
({ \
const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
(access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
- __get_user(x, private_ptr) : ((x) = 0, -EFAULT)); \
+ __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
})
#define __put_user(x, ptr) \
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index bf148037d4e54..cd17945bf2181 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -5,7 +5,7 @@
typedef struct
{
- volatile unsigned long counter;
+ volatile long counter;
} local_t;
#define LOCAL_INIT(i) { (i) }
@@ -13,7 +13,7 @@ typedef struct
#define local_read(v) ((v)->counter)
#define local_set(v,i) (((v)->counter) = (i))
-static __inline__ void local_inc(local_t *v)
+static inline void local_inc(local_t *v)
{
__asm__ __volatile__(
"incq %0"
@@ -21,7 +21,7 @@ static __inline__ void local_inc(local_t *v)
:"m" (v->counter));
}
-static __inline__ void local_dec(local_t *v)
+static inline void local_dec(local_t *v)
{
__asm__ __volatile__(
"decq %0"
@@ -29,7 +29,7 @@ static __inline__ void local_dec(local_t *v)
:"m" (v->counter));
}
-static __inline__ void local_add(unsigned int i, local_t *v)
+static inline void local_add(long i, local_t *v)
{
__asm__ __volatile__(
"addq %1,%0"
@@ -37,7 +37,7 @@ static __inline__ void local_add(unsigned int i, local_t *v)
:"ir" (i), "m" (v->counter));
}
-static __inline__ void local_sub(unsigned int i, local_t *v)
+static inline void local_sub(long i, local_t *v)
{
__asm__ __volatile__(
"subq %1,%0"
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index bb9e543223225..75e91f5b6a04c 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -19,20 +19,25 @@ struct fb_info;
struct backlight_properties {
/* Owner module */
struct module *owner;
- /* Get the backlight power status (0: full on, 1..3: power saving
- modes; 4: full off), see FB_BLANK_XXX */
- int (*get_power)(struct backlight_device *);
- /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
- int (*set_power)(struct backlight_device *, int power);
- /* Maximal value for brightness (read-only) */
- int max_brightness;
- /* Get current backlight brightness */
+
+ /* Notify the backlight driver some property has changed */
+ int (*update_status)(struct backlight_device *);
+ /* Return the current backlight brightness (accounting for power,
+ fb_blank etc.) */
int (*get_brightness)(struct backlight_device *);
- /* Set backlight brightness (0..max_brightness) */
- int (*set_brightness)(struct backlight_device *, int brightness);
/* Check if given framebuffer device is the one bound to this backlight;
return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
int (*check_fb)(struct fb_info *);
+
+ /* Current User requested brightness (0 - max_brightness) */
+ int brightness;
+ /* Maximal value for brightness (read-only) */
+ int max_brightness;
+ /* Current FB Power mode (0: full on, 1..3: power saving
+ modes; 4: full off), see FB_BLANK_XXX */
+ int power;
+ /* FB Blanking active? (values as for power) */
+ int fb_blank;
};
struct backlight_device {
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d10bd30c337e2..836325ee0931e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -275,6 +275,7 @@ extern void d_move(struct dentry *, struct dentry *);
/* appendix may either be NULL or be used for transname suffixes */
extern struct dentry * d_lookup(struct dentry *, struct qstr *);
extern struct dentry * __d_lookup(struct dentry *, struct qstr *);
+extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
/* validate "insecure" dentry pointer */
extern int d_validate(struct dentry *, struct dentry *);
diff --git a/include/linux/fadvise.h b/include/linux/fadvise.h
index b2913bba35d87..e8e747139b9a1 100644
--- a/include/linux/fadvise.h
+++ b/include/linux/fadvise.h
@@ -18,10 +18,4 @@
#define POSIX_FADV_NOREUSE 5 /* Data will be accessed once. */
#endif
-/*
- * Linux-specific fadvise() extensions:
- */
-#define LINUX_FADV_ASYNC_WRITE 32 /* Start writeout on range */
-#define LINUX_FADV_WRITE_WAIT 33 /* Wait upon writeout to range */
-
#endif /* FADVISE_H_INCLUDED */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d03fadfcafe37..315d89740ddff 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -839,12 +839,10 @@ struct fb_info {
#define FB_LEFT_POS(bpp) (32 - bpp)
#define FB_SHIFT_HIGH(val, bits) ((val) >> (bits))
#define FB_SHIFT_LOW(val, bits) ((val) << (bits))
-#define FB_BIT_NR(b) (7 - (b))
#else
#define FB_LEFT_POS(bpp) (0)
#define FB_SHIFT_HIGH(val, bits) ((val) << (bits))
#define FB_SHIFT_LOW(val, bits) ((val) >> (bits))
-#define FB_BIT_NR(b) (b)
#endif
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 20fa5f6d7269f..1e9ebaba07b7a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -757,6 +757,13 @@ extern void send_sigio(struct fown_struct *fown, int fd, int band);
extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
extern int fcntl_getlease(struct file *filp);
+/* fs/sync.c */
+#define SYNC_FILE_RANGE_WAIT_BEFORE 1
+#define SYNC_FILE_RANGE_WRITE 2
+#define SYNC_FILE_RANGE_WAIT_AFTER 4
+extern int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte,
+ int flags);
+
/* fs/locks.c */
extern void locks_init_lock(struct file_lock *);
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
@@ -857,7 +864,7 @@ struct super_block {
*/
struct mutex s_vfs_rename_mutex; /* Kludge */
- /* Granuality of c/m/atime in ns.
+ /* Granularity of c/m/atime in ns.
Cannot be worse than a second */
u32 s_time_gran;
};
@@ -1413,6 +1420,7 @@ extern void bd_release_from_disk(struct block_device *, struct gendisk *);
#endif
/* fs/char_dev.c */
+#define CHRDEV_MAJOR_HASH_SIZE 255
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int register_chrdev(unsigned int, const char *,
@@ -1420,25 +1428,17 @@ extern int register_chrdev(unsigned int, const char *,
extern int unregister_chrdev(unsigned int, const char *);
extern void unregister_chrdev_region(dev_t, unsigned);
extern int chrdev_open(struct inode *, struct file *);
-extern int get_chrdev_list(char *);
-extern void *acquire_chrdev_list(void);
-extern int count_chrdev_list(void);
-extern void *get_next_chrdev(void *);
-extern int get_chrdev_info(void *, int *, char **);
-extern void release_chrdev_list(void *);
+extern void chrdev_show(struct seq_file *,off_t);
/* fs/block_dev.c */
+#define BLKDEV_MAJOR_HASH_SIZE 255
#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
extern const char *__bdevname(dev_t, char *buffer);
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
extern struct block_device *open_bdev_excl(const char *, int, void *);
extern void close_bdev_excl(struct block_device *);
-extern void *acquire_blkdev_list(void);
-extern int count_blkdev_list(void);
-extern void *get_next_blkdev(void *);
-extern int get_blkdev_info(void *, int *, char **);
-extern void release_blkdev_list(void *);
+extern void blkdev_show(struct seq_file *,off_t);
extern void init_special_inode(struct inode *, umode_t, dev_t);
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 9c8e6da2393bb..71e7b2847cb30 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -11,6 +11,7 @@
#include <asm/io.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/timer.h>
@@ -40,7 +41,7 @@ struct gameport {
struct gameport *parent, *child;
struct gameport_driver *drv;
- struct semaphore drv_sem; /* protects serio->drv so attributes can pin driver */
+ struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
struct device dev;
unsigned int registered; /* port has been fully registered with driver core */
@@ -137,12 +138,12 @@ static inline void gameport_set_drvdata(struct gameport *gameport, void *data)
*/
static inline int gameport_pin_driver(struct gameport *gameport)
{
- return down_interruptible(&gameport->drv_sem);
+ return mutex_lock_interruptible(&gameport->drv_mutex);
}
static inline void gameport_unpin_driver(struct gameport *gameport)
{
- up(&gameport->drv_sem);
+ mutex_unlock(&gameport->drv_mutex);
}
void __gameport_register_driver(struct gameport_driver *drv, struct module *owner);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 93830158348e7..306acf1dc6d58 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -58,6 +58,19 @@ struct hrtimer {
};
/**
+ * struct hrtimer_sleeper - simple sleeper structure
+ *
+ * @timer: embedded timer structure
+ * @task: task to wake up
+ *
+ * task is set to NULL, when the timer expires.
+ */
+struct hrtimer_sleeper {
+ struct hrtimer timer;
+ struct task_struct *task;
+};
+
+/**
* struct hrtimer_base - the timer base for a specific clock
*
* @index: clock type index for per_cpu support when moving a timer
@@ -67,7 +80,7 @@ struct hrtimer {
* @first: pointer to the timer node which expires first
* @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
- * @get_sofirq_time: function to retrieve the current time from the softirq
+ * @get_softirq_time: function to retrieve the current time from the softirq
* @curr_timer: the timer which is executing a callback right now
* @softirq_time: the time when running the hrtimer queue in the softirq
*/
@@ -127,6 +140,9 @@ extern long hrtimer_nanosleep(struct timespec *rqtp,
const enum hrtimer_mode mode,
const clockid_t clockid);
+extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
+ struct task_struct *tsk);
+
/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
diff --git a/include/linux/input.h b/include/linux/input.h
index 1d4e341b72e62..b0e612dda0cf2 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -421,7 +421,7 @@ struct input_absinfo {
#define BTN_GEAR_UP 0x151
#define KEY_OK 0x160
-#define KEY_SELECT 0x161
+#define KEY_SELECT 0x161
#define KEY_GOTO 0x162
#define KEY_CLEAR 0x163
#define KEY_POWER2 0x164
@@ -512,6 +512,15 @@ struct input_absinfo {
#define KEY_FN_S 0x1e3
#define KEY_FN_B 0x1e4
+#define KEY_BRL_DOT1 0x1f1
+#define KEY_BRL_DOT2 0x1f2
+#define KEY_BRL_DOT3 0x1f3
+#define KEY_BRL_DOT4 0x1f4
+#define KEY_BRL_DOT5 0x1f5
+#define KEY_BRL_DOT6 0x1f6
+#define KEY_BRL_DOT7 0x1f7
+#define KEY_BRL_DOT8 0x1f8
+
/* We avoid low common keys in module aliases so they don't get huge. */
#define KEY_MIN_INTERESTING KEY_MUTE
#define KEY_MAX 0x1ff
@@ -929,7 +938,7 @@ struct input_dev {
struct input_handle *grab;
- struct semaphore sem; /* serializes open and close operations */
+ struct mutex mutex; /* serializes open and close operations */
unsigned int users;
struct class_device cdev;
@@ -995,11 +1004,6 @@ static inline void init_input_dev(struct input_dev *dev)
struct input_dev *input_allocate_device(void);
-static inline void input_free_device(struct input_dev *dev)
-{
- kfree(dev);
-}
-
static inline struct input_dev *input_get_device(struct input_dev *dev)
{
return to_input_dev(class_device_get(&dev->cdev));
@@ -1010,6 +1014,11 @@ static inline void input_put_device(struct input_dev *dev)
class_device_put(&dev->cdev);
}
+static inline void input_free_device(struct input_dev *dev)
+{
+ input_put_device(dev);
+}
+
int input_register_device(struct input_dev *);
void input_unregister_device(struct input_dev *);
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 53571288a9fc0..6d9c7e4da4720 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -82,6 +82,13 @@ struct ipmi_smi_handlers
{
struct module *owner;
+ /* The low-level interface cannot start sending messages to
+ the upper layer until this function is called. This may
+ not be NULL, the lower layer must take the interface from
+ this call. */
+ int (*start_processing)(void *send_info,
+ ipmi_smi_t new_intf);
+
/* Called to enqueue an SMI message to be sent. This
operation is not allowed to fail. If an error occurs, it
should report back the error in a received message. It may
@@ -157,13 +164,16 @@ static inline void ipmi_demangle_device_id(unsigned char *data,
}
/* Add a low-level interface to the IPMI driver. Note that if the
- interface doesn't know its slave address, it should pass in zero. */
+ interface doesn't know its slave address, it should pass in zero.
+ The low-level interface should not deliver any messages to the
+ upper layer until the start_processing() function in the handlers
+ is called, and the lower layer must get the interface from that
+ call. */
int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *dev,
- unsigned char slave_addr,
- ipmi_smi_t *intf);
+ unsigned char slave_addr);
/*
* Remove a low-level interface from the IPMI driver. This will
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index e87c32a5c86a0..4eb851ece080d 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -135,6 +135,8 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
#define U(x) ((x) ^ 0xf000)
+#define BRL_UC_ROW 0x2800
+
/* keyboard.c */
struct console;
diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h
index 08488042d74a7..de76843bbe8a8 100644
--- a/include/linux/keyboard.h
+++ b/include/linux/keyboard.h
@@ -44,6 +44,7 @@ extern unsigned short plain_map[NR_KEYS];
#define KT_ASCII 9
#define KT_LOCK 10
#define KT_SLOCK 12
+#define KT_BRL 14
#define K(t,v) (((t)<<8)|(v))
#define KTYP(x) ((x) >> 8)
@@ -427,5 +428,17 @@ extern unsigned short plain_map[NR_KEYS];
#define NR_LOCK 8
+#define K_BRL_BLANK K(KT_BRL, 0)
+#define K_BRL_DOT1 K(KT_BRL, 1)
+#define K_BRL_DOT2 K(KT_BRL, 2)
+#define K_BRL_DOT3 K(KT_BRL, 3)
+#define K_BRL_DOT4 K(KT_BRL, 4)
+#define K_BRL_DOT5 K(KT_BRL, 5)
+#define K_BRL_DOT6 K(KT_BRL, 6)
+#define K_BRL_DOT7 K(KT_BRL, 7)
+#define K_BRL_DOT8 K(KT_BRL, 8)
+
+#define NR_BRL 9
+
#define MAX_DIACR 256
#endif
diff --git a/include/linux/leds.h b/include/linux/leds.h
new file mode 100644
index 0000000000000..4617e75903b00
--- /dev/null
+++ b/include/linux/leds.h
@@ -0,0 +1,111 @@
+/*
+ * Driver model for leds and led triggers
+ *
+ * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
+ * Copyright (C) 2005 Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_LEDS_H_INCLUDED
+#define __LINUX_LEDS_H_INCLUDED
+
+struct device;
+struct class_device;
+/*
+ * LED Core
+ */
+
+enum led_brightness {
+ LED_OFF = 0,
+ LED_HALF = 127,
+ LED_FULL = 255,
+};
+
+struct led_classdev {
+ const char *name;
+ int brightness;
+ int flags;
+#define LED_SUSPENDED (1 << 0)
+
+ /* A function to set the brightness of the led */
+ void (*brightness_set)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+
+ struct class_device *class_dev;
+ /* LED Device linked list */
+ struct list_head node;
+
+ /* Trigger data */
+ char *default_trigger;
+#ifdef CONFIG_LEDS_TRIGGERS
+ rwlock_t trigger_lock;
+ /* Protects the trigger data below */
+
+ struct led_trigger *trigger;
+ struct list_head trig_list;
+ void *trigger_data;
+#endif
+};
+
+extern int led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev);
+extern void led_classdev_unregister(struct led_classdev *led_cdev);
+extern void led_classdev_suspend(struct led_classdev *led_cdev);
+extern void led_classdev_resume(struct led_classdev *led_cdev);
+
+/*
+ * LED Triggers
+ */
+#ifdef CONFIG_LEDS_TRIGGERS
+
+#define TRIG_NAME_MAX 50
+
+struct led_trigger {
+ /* Trigger Properties */
+ const char *name;
+ void (*activate)(struct led_classdev *led_cdev);
+ void (*deactivate)(struct led_classdev *led_cdev);
+
+ /* LEDs under control by this trigger (for simple triggers) */
+ rwlock_t leddev_list_lock;
+ struct list_head led_cdevs;
+
+ /* Link to next registered trigger */
+ struct list_head next_trig;
+};
+
+/* Registration functions for complex triggers */
+extern int led_trigger_register(struct led_trigger *trigger);
+extern void led_trigger_unregister(struct led_trigger *trigger);
+
+/* Registration functions for simple triggers */
+#define DEFINE_LED_TRIGGER(x) static struct led_trigger *x;
+#define DEFINE_LED_TRIGGER_GLOBAL(x) struct led_trigger *x;
+extern void led_trigger_register_simple(const char *name,
+ struct led_trigger **trigger);
+extern void led_trigger_unregister_simple(struct led_trigger *trigger);
+extern void led_trigger_event(struct led_trigger *trigger,
+ enum led_brightness event);
+
+#else
+
+/* Triggers aren't active - null macros */
+#define DEFINE_LED_TRIGGER(x)
+#define DEFINE_LED_TRIGGER_GLOBAL(x)
+#define led_trigger_register_simple(x, y) do {} while(0)
+#define led_trigger_unregister_simple(x) do {} while(0)
+#define led_trigger_event(x, y) do {} while(0)
+
+#endif
+
+/* Trigger specific functions */
+#ifdef CONFIG_LEDS_TRIGGER_IDE_DISK
+extern void ledtrig_ide_activity(void);
+#else
+#define ledtrig_ide_activity() do {} while(0)
+#endif
+
+#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index a710bddda4eba..08a450a9dbf7d 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -28,7 +28,7 @@ struct ps2dev {
struct serio *serio;
/* Ensures that only one command is executing at a time */
- struct semaphore cmd_sem;
+ struct mutex cmd_mutex;
/* Used to signal completion from interrupt handler */
wait_queue_head_t wait;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 7d09962c3c0bb..ff0a64073ebcc 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,7 @@ extern void migrate_page_copy(struct page *, struct page *);
extern int migrate_page_remove_references(struct page *, struct page *, int);
extern int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed);
-int migrate_pages_to(struct list_head *pagelist,
+extern int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest);
extern int fail_migrate_page(struct page *, struct page *);
@@ -26,6 +26,9 @@ static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
+static inline int migrate_pages_to(struct list_head *pagelist,
+ struct vm_area_struct *vma, int dest) { return 0; }
+
static inline int migrate_prep(void) { return -ENOSYS; }
/* Possible settings for the migrate_page() method in address_operations */
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index f46afec6fbf89..72fc68c5ee963 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -10,7 +10,7 @@
#ifndef __MTD_TRANS_H__
#define __MTD_TRANS_H__
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
struct hd_geometry;
struct mtd_info;
@@ -22,7 +22,7 @@ struct mtd_blktrans_dev {
struct mtd_blktrans_ops *tr;
struct list_head list;
struct mtd_info *mtd;
- struct semaphore sem;
+ struct mutex lock;
int devnum;
int blksize;
unsigned long size;
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h
index 386a52cf8b1b4..9addd073bf159 100644
--- a/include/linux/mtd/doc2000.h
+++ b/include/linux/mtd/doc2000.h
@@ -15,7 +15,7 @@
#define __MTD_DOC2000_H__
#include <linux/mtd/mtd.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#define DoC_Sig1 0
#define DoC_Sig2 1
@@ -187,7 +187,7 @@ struct DiskOnChip {
int numchips;
struct Nand *chips;
struct mtd_info *nextdoc;
- struct semaphore lock;
+ struct mutex lock;
};
int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]);
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index 0268125a6271d..d7eaa40e5ab0c 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -52,6 +52,11 @@ struct INFTLrecord {
int INFTL_mount(struct INFTLrecord *s);
int INFTL_formatblock(struct INFTLrecord *s, int block);
+extern char inftlmountrev[];
+
+void INFTL_dumptables(struct INFTLrecord *s);
+void INFTL_dumpVUchains(struct INFTLrecord *s);
+
#endif /* __KERNEL__ */
#endif /* __MTD_INFTL_H__ */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index e6698013e4d07..58cb3d3d44b47 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -75,7 +75,6 @@ extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
extern void release_open_intent(struct nameidata *);
extern struct dentry * lookup_one_len(const char *, struct dentry *, int);
-extern __deprecated_for_modules struct dentry * lookup_hash(struct nameidata *);
extern int follow_down(struct vfsmount **, struct dentry **);
extern int follow_up(struct vfsmount **, struct dentry **);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 950dc55e51928..40ccf8cc42393 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -598,20 +598,7 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
#define HAVE_NETIF_QUEUE
-static inline void __netif_schedule(struct net_device *dev)
-{
- if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
- unsigned long flags;
- struct softnet_data *sd;
-
- local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
- dev->next_sched = sd->output_queue;
- sd->output_queue = dev;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
- }
-}
+extern void __netif_schedule(struct net_device *dev);
static inline void netif_schedule(struct net_device *dev)
{
@@ -675,13 +662,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
/* Use this variant in places where it could be invoked
* either from interrupt or non-interrupt context.
*/
-static inline void dev_kfree_skb_any(struct sk_buff *skb)
-{
- if (in_irq() || irqs_disabled())
- dev_kfree_skb_irq(skb);
- else
- dev_kfree_skb(skb);
-}
+extern void dev_kfree_skb_any(struct sk_buff *skb);
#define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb);
@@ -768,22 +749,9 @@ static inline int netif_device_present(struct net_device *dev)
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
-static inline void netif_device_detach(struct net_device *dev)
-{
- if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
- netif_running(dev)) {
- netif_stop_queue(dev);
- }
-}
+extern void netif_device_detach(struct net_device *dev);
-static inline void netif_device_attach(struct net_device *dev)
-{
- if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
- netif_running(dev)) {
- netif_wake_queue(dev);
- __netdev_watchdog_up(dev);
- }
-}
+extern void netif_device_attach(struct net_device *dev);
/*
* Network interface message level settings
@@ -851,20 +819,7 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
* already been called and returned 1.
*/
-static inline void __netif_rx_schedule(struct net_device *dev)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- dev_hold(dev);
- list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
- if (dev->quota < 0)
- dev->quota += dev->weight;
- else
- dev->quota = dev->weight;
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
- local_irq_restore(flags);
-}
+extern void __netif_rx_schedule(struct net_device *dev);
/* Try to reschedule poll. Called by irq handler. */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 1350e47b0234d..f6bdef82a322a 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -142,6 +142,12 @@ struct xt_counters_info
#define ASSERT_WRITE_LOCK(x)
#include <linux/netfilter_ipv4/listhelp.h>
+#ifdef CONFIG_COMPAT
+#define COMPAT_TO_USER 1
+#define COMPAT_FROM_USER -1
+#define COMPAT_CALC_SIZE 0
+#endif
+
struct xt_match
{
struct list_head list;
@@ -175,6 +181,9 @@ struct xt_match
void (*destroy)(const struct xt_match *match, void *matchinfo,
unsigned int matchinfosize);
+ /* Called when userspace align differs from kernel space one */
+ int (*compat)(void *match, void **dstptr, int *size, int convert);
+
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
@@ -220,6 +229,9 @@ struct xt_target
void (*destroy)(const struct xt_target *target, void *targinfo,
unsigned int targinfosize);
+ /* Called when userspace align differs from kernel space one */
+ int (*compat)(void *target, void **dstptr, int *size, int convert);
+
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
@@ -314,6 +326,61 @@ extern void xt_proto_fini(int af);
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
extern void xt_free_table_info(struct xt_table_info *info);
+#ifdef CONFIG_COMPAT
+#include <net/compat.h>
+
+struct compat_xt_entry_match
+{
+ union {
+ struct {
+ u_int16_t match_size;
+ char name[XT_FUNCTION_MAXNAMELEN - 1];
+ u_int8_t revision;
+ } user;
+ u_int16_t match_size;
+ } u;
+ unsigned char data[0];
+};
+
+struct compat_xt_entry_target
+{
+ union {
+ struct {
+ u_int16_t target_size;
+ char name[XT_FUNCTION_MAXNAMELEN - 1];
+ u_int8_t revision;
+ } user;
+ u_int16_t target_size;
+ } u;
+ unsigned char data[0];
+};
+
+/* FIXME: this works only on 32 bit tasks
+ * need to change whole approach in order to calculate align as function of
+ * current task alignment */
+
+struct compat_xt_counters
+{
+ u_int32_t cnt[4];
+};
+
+struct compat_xt_counters_info
+{
+ char name[XT_TABLE_MAXNAMELEN];
+ compat_uint_t num_counters;
+ struct compat_xt_counters counters[0];
+};
+
+#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \
+ & ~(__alignof__(struct compat_xt_counters)-1))
+
+extern void xt_compat_lock(int af);
+extern void xt_compat_unlock(int af);
+extern int xt_compat_match(void *match, void **dstptr, int *size, int convert);
+extern int xt_compat_target(void *target, void **dstptr, int *size,
+ int convert);
+
+#endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter/xt_esp.h b/include/linux/netfilter/xt_esp.h
new file mode 100644
index 0000000000000..9380fb1c27da9
--- /dev/null
+++ b/include/linux/netfilter/xt_esp.h
@@ -0,0 +1,14 @@
+#ifndef _XT_ESP_H
+#define _XT_ESP_H
+
+struct xt_esp
+{
+ u_int32_t spis[2]; /* Security Parameter Index */
+ u_int8_t invflags; /* Inverse flags */
+};
+
+/* Values for "invflags" field in struct xt_esp. */
+#define XT_ESP_INV_SPI 0x01 /* Invert the sense of spi. */
+#define XT_ESP_INV_MASK 0x01 /* All possible flags. */
+
+#endif /*_XT_ESP_H*/
diff --git a/include/linux/netfilter/xt_multiport.h b/include/linux/netfilter/xt_multiport.h
new file mode 100644
index 0000000000000..d49ee41837101
--- /dev/null
+++ b/include/linux/netfilter/xt_multiport.h
@@ -0,0 +1,30 @@
+#ifndef _XT_MULTIPORT_H
+#define _XT_MULTIPORT_H
+
+enum xt_multiport_flags
+{
+ XT_MULTIPORT_SOURCE,
+ XT_MULTIPORT_DESTINATION,
+ XT_MULTIPORT_EITHER
+};
+
+#define XT_MULTI_PORTS 15
+
+/* Must fit inside union xt_matchinfo: 16 bytes */
+struct xt_multiport
+{
+ u_int8_t flags; /* Type of comparison */
+ u_int8_t count; /* Number of ports */
+ u_int16_t ports[XT_MULTI_PORTS]; /* Ports */
+};
+
+struct xt_multiport_v1
+{
+ u_int8_t flags; /* Type of comparison */
+ u_int8_t count; /* Number of ports */
+ u_int16_t ports[XT_MULTI_PORTS]; /* Ports */
+ u_int8_t pflags[XT_MULTI_PORTS]; /* Port flags */
+ u_int8_t invert; /* Invert flag */
+};
+
+#endif /*_XT_MULTIPORT_H*/
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index d5b8c0d6a12b3..c0dac16e1902d 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -316,5 +316,23 @@ extern unsigned int ipt_do_table(struct sk_buff **pskb,
void *userdata);
#define IPT_ALIGN(s) XT_ALIGN(s)
+
+#ifdef CONFIG_COMPAT
+#include <net/compat.h>
+
+struct compat_ipt_entry
+{
+ struct ipt_ip ip;
+ compat_uint_t nfcache;
+ u_int16_t target_offset;
+ u_int16_t next_offset;
+ compat_uint_t comefrom;
+ struct compat_xt_counters counters;
+ unsigned char elems[0];
+};
+
+#define COMPAT_IPT_ALIGN(s) COMPAT_XT_ALIGN(s)
+
+#endif /* CONFIG_COMPAT */
#endif /*__KERNEL__*/
#endif /* _IPTABLES_H */
diff --git a/include/linux/netfilter_ipv4/ipt_esp.h b/include/linux/netfilter_ipv4/ipt_esp.h
index c782a83e53e0d..78296e7eeff98 100644
--- a/include/linux/netfilter_ipv4/ipt_esp.h
+++ b/include/linux/netfilter_ipv4/ipt_esp.h
@@ -1,16 +1,10 @@
#ifndef _IPT_ESP_H
#define _IPT_ESP_H
-struct ipt_esp
-{
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int8_t invflags; /* Inverse flags */
-};
+#include <linux/netfilter/xt_esp.h>
-
-
-/* Values for "invflags" field in struct ipt_esp. */
-#define IPT_ESP_INV_SPI 0x01 /* Invert the sense of spi. */
-#define IPT_ESP_INV_MASK 0x01 /* All possible flags. */
+#define ipt_esp xt_esp
+#define IPT_ESP_INV_SPI XT_ESP_INV_SPI
+#define IPT_ESP_INV_MASK XT_ESP_INV_MASK
#endif /*_IPT_ESP_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_multiport.h b/include/linux/netfilter_ipv4/ipt_multiport.h
index e6b6fff811dfb..55fe85eca88c1 100644
--- a/include/linux/netfilter_ipv4/ipt_multiport.h
+++ b/include/linux/netfilter_ipv4/ipt_multiport.h
@@ -1,30 +1,15 @@
#ifndef _IPT_MULTIPORT_H
#define _IPT_MULTIPORT_H
-#include <linux/netfilter_ipv4/ip_tables.h>
-enum ipt_multiport_flags
-{
- IPT_MULTIPORT_SOURCE,
- IPT_MULTIPORT_DESTINATION,
- IPT_MULTIPORT_EITHER
-};
+#include <linux/netfilter/xt_multiport.h>
-#define IPT_MULTI_PORTS 15
+#define IPT_MULTIPORT_SOURCE XT_MULTIPORT_SOURCE
+#define IPT_MULTIPORT_DESTINATION XT_MULTIPORT_DESTINATION
+#define IPT_MULTIPORT_EITHER XT_MULTIPORT_EITHER
-/* Must fit inside union ipt_matchinfo: 16 bytes */
-struct ipt_multiport
-{
- u_int8_t flags; /* Type of comparison */
- u_int8_t count; /* Number of ports */
- u_int16_t ports[IPT_MULTI_PORTS]; /* Ports */
-};
+#define IPT_MULTI_PORTS XT_MULTI_PORTS
+
+#define ipt_multiport xt_multiport
+#define ipt_multiport_v1 xt_multiport_v1
-struct ipt_multiport_v1
-{
- u_int8_t flags; /* Type of comparison */
- u_int8_t count; /* Number of ports */
- u_int16_t ports[IPT_MULTI_PORTS]; /* Ports */
- u_int8_t pflags[IPT_MULTI_PORTS]; /* Port flags */
- u_int8_t invert; /* Invert flag */
-};
#endif /*_IPT_MULTIPORT_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_esp.h b/include/linux/netfilter_ipv6/ip6t_esp.h
index a91b6abc8079e..f62eaf53c16cb 100644
--- a/include/linux/netfilter_ipv6/ip6t_esp.h
+++ b/include/linux/netfilter_ipv6/ip6t_esp.h
@@ -1,14 +1,10 @@
#ifndef _IP6T_ESP_H
#define _IP6T_ESP_H
-struct ip6t_esp
-{
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int8_t invflags; /* Inverse flags */
-};
+#include <linux/netfilter/xt_esp.h>
-/* Values for "invflags" field in struct ip6t_esp. */
-#define IP6T_ESP_INV_SPI 0x01 /* Invert the sense of spi. */
-#define IP6T_ESP_INV_MASK 0x01 /* All possible flags. */
+#define ip6t_esp xt_esp
+#define IP6T_ESP_INV_SPI XT_ESP_INV_SPI
+#define IP6T_ESP_INV_MASK XT_ESP_INV_MASK
#endif /*_IP6T_ESP_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_multiport.h b/include/linux/netfilter_ipv6/ip6t_multiport.h
index efe4954a8681c..042c92661ceea 100644
--- a/include/linux/netfilter_ipv6/ip6t_multiport.h
+++ b/include/linux/netfilter_ipv6/ip6t_multiport.h
@@ -1,21 +1,14 @@
#ifndef _IP6T_MULTIPORT_H
#define _IP6T_MULTIPORT_H
-#include <linux/netfilter_ipv6/ip6_tables.h>
-enum ip6t_multiport_flags
-{
- IP6T_MULTIPORT_SOURCE,
- IP6T_MULTIPORT_DESTINATION,
- IP6T_MULTIPORT_EITHER
-};
+#include <linux/netfilter/xt_multiport.h>
-#define IP6T_MULTI_PORTS 15
+#define IP6T_MULTIPORT_SOURCE XT_MULTIPORT_SOURCE
+#define IP6T_MULTIPORT_DESTINATION XT_MULTIPORT_DESTINATION
+#define IP6T_MULTIPORT_EITHER XT_MULTIPORT_EITHER
-/* Must fit inside union ip6t_matchinfo: 16 bytes */
-struct ip6t_multiport
-{
- u_int8_t flags; /* Type of comparison */
- u_int8_t count; /* Number of ports */
- u_int16_t ports[IP6T_MULTI_PORTS]; /* Ports */
-};
-#endif /*_IPT_MULTIPORT_H*/
+#define IP6T_MULTI_PORTS XT_MULTI_PORTS
+
+#define ip6t_multiport xt_multiport
+
+#endif /*_IP6T_MULTIPORT_H*/
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 839f0b3c23aa8..9539efd4f7e67 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -72,8 +72,8 @@ extern struct page * find_get_page(struct address_space *mapping,
unsigned long index);
extern struct page * find_lock_page(struct address_space *mapping,
unsigned long index);
-extern struct page * find_trylock_page(struct address_space *mapping,
- unsigned long index);
+extern __deprecated_for_modules struct page * find_trylock_page(
+ struct address_space *mapping, unsigned long index);
extern struct page * find_or_create_page(struct address_space *mapping,
unsigned long index, gfp_t gfp_mask);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 5b9082cc600fd..29960b03bef75 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_PID_H
#define _LINUX_PID_H
+#include <linux/rcupdate.h>
+
enum pid_type
{
PIDTYPE_PID,
@@ -9,45 +11,109 @@ enum pid_type
PIDTYPE_MAX
};
+/*
+ * What is struct pid?
+ *
+ * A struct pid is the kernel's internal notion of a process identifier.
+ * It refers to individual tasks, process groups, and sessions. While
+ * there are processes attached to it the struct pid lives in a hash
+ * table, so it and then the processes that it refers to can be found
+ * quickly from the numeric pid value. The attached processes may be
+ * quickly accessed by following pointers from struct pid.
+ *
+ * Storing pid_t values in the kernel and refering to them later has a
+ * problem. The process originally with that pid may have exited and the
+ * pid allocator wrapped, and another process could have come along
+ * and been assigned that pid.
+ *
+ * Referring to user space processes by holding a reference to struct
+ * task_struct has a problem. When the user space process exits
+ * the now useless task_struct is still kept. A task_struct plus a
+ * stack consumes around 10K of low kernel memory. More precisely
+ * this is THREAD_SIZE + sizeof(struct task_struct). By comparison
+ * a struct pid is about 64 bytes.
+ *
+ * Holding a reference to struct pid solves both of these problems.
+ * It is small so holding a reference does not consume a lot of
+ * resources, and since a new struct pid is allocated when the numeric
+ * pid value is reused we don't mistakenly refer to new processes.
+ */
+
struct pid
{
+ atomic_t count;
/* Try to keep pid_chain in the same cacheline as nr for find_pid */
int nr;
struct hlist_node pid_chain;
- /* list of pids with the same nr, only one of them is in the hash */
- struct list_head pid_list;
+ /* lists of tasks that use this pid */
+ struct hlist_head tasks[PIDTYPE_MAX];
+ struct rcu_head rcu;
};
-#define pid_task(elem, type) \
- list_entry(elem, struct task_struct, pids[type].pid_list)
+struct pid_link
+{
+ struct hlist_node node;
+ struct pid *pid;
+};
+
+static inline struct pid *get_pid(struct pid *pid)
+{
+ if (pid)
+ atomic_inc(&pid->count);
+ return pid;
+}
+
+extern void FASTCALL(put_pid(struct pid *pid));
+extern struct task_struct *FASTCALL(pid_task(struct pid *pid, enum pid_type));
+extern struct task_struct *FASTCALL(get_pid_task(struct pid *pid,
+ enum pid_type));
/*
* attach_pid() and detach_pid() must be called with the tasklist_lock
* write-held.
*/
-extern int FASTCALL(attach_pid(struct task_struct *task, enum pid_type type, int nr));
+extern int FASTCALL(attach_pid(struct task_struct *task,
+ enum pid_type type, int nr));
extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type));
/*
* look up a PID in the hash table. Must be called with the tasklist_lock
- * held.
+ * or rcu_read_lock() held.
+ */
+extern struct pid *FASTCALL(find_pid(int nr));
+
+/*
+ * Lookup a PID in the hash table, and return with it's count elevated.
*/
-extern struct pid *FASTCALL(find_pid(enum pid_type, int));
+extern struct pid *find_get_pid(int nr);
-extern int alloc_pidmap(void);
-extern void FASTCALL(free_pidmap(int));
+extern struct pid *alloc_pid(void);
+extern void FASTCALL(free_pid(struct pid *pid));
+#define pid_next(task, type) \
+ ((task)->pids[(type)].node.next)
+
+#define pid_next_task(task, type) \
+ hlist_entry(pid_next(task, type), struct task_struct, \
+ pids[(type)].node)
+
+
+/* We could use hlist_for_each_entry_rcu here but it takes more arguments
+ * than the do_each_task_pid/while_each_task_pid. So we roll our own
+ * to preserve the existing interface.
+ */
#define do_each_task_pid(who, type, task) \
if ((task = find_task_by_pid_type(type, who))) { \
- prefetch((task)->pids[type].pid_list.next); \
+ prefetch(pid_next(task, type)); \
do {
#define while_each_task_pid(who, type, task) \
- } while (task = pid_task((task)->pids[type].pid_list.next,\
- type), \
- prefetch((task)->pids[type].pid_list.next), \
- hlist_unhashed(&(task)->pids[type].pid_chain)); \
- } \
+ } while (pid_next(task, type) && ({ \
+ task = pid_next_task(task, type); \
+ rcu_dereference(task); \
+ prefetch(pid_next(task, type)); \
+ 1; }) ); \
+ }
#endif /* _LINUX_PID_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 75c7f55023ab6..d218fc7293199 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -60,5 +60,8 @@ void free_pipe_info(struct inode* inode);
* add the splice flags here.
*/
#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
+#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+ /* we may still block on the fd we splice */
+ /* from/to, of course */
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d04186d8cc685..541f4828f5e76 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -100,6 +100,7 @@ DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
+extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
#include <linux/time.h>
@@ -483,6 +484,7 @@ struct signal_struct {
#define MAX_PRIO (MAX_RT_PRIO + 40)
#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO))
+#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
/*
* Some day this will be a full-fledged user tracking system..
@@ -683,6 +685,13 @@ static inline void prefetch_stack(struct task_struct *t) { }
struct audit_context; /* See audit.c */
struct mempolicy;
+enum sleep_type {
+ SLEEP_NORMAL,
+ SLEEP_NONINTERACTIVE,
+ SLEEP_INTERACTIVE,
+ SLEEP_INTERRUPTED,
+};
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
struct thread_info *thread_info;
@@ -705,7 +714,7 @@ struct task_struct {
unsigned long sleep_avg;
unsigned long long timestamp, last_ran;
unsigned long long sched_time; /* sched_clock time spent running */
- int activated;
+ enum sleep_type sleep_type;
unsigned long policy;
cpumask_t cpus_allowed;
@@ -751,7 +760,7 @@ struct task_struct {
struct task_struct *group_leader; /* threadgroup leader */
/* PID/PID hash table linkage. */
- struct pid pids[PIDTYPE_MAX];
+ struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -890,18 +899,19 @@ static inline pid_t process_group(struct task_struct *tsk)
*/
static inline int pid_alive(struct task_struct *p)
{
- return p->pids[PIDTYPE_PID].nr != 0;
+ return p->pids[PIDTYPE_PID].pid != NULL;
}
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
extern void __put_task_struct_cb(struct rcu_head *rhp);
+extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
{
if (atomic_dec_and_test(&t->usage))
- call_rcu(&t->rcu, __put_task_struct_cb);
+ __put_task_struct(t);
}
/*
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 690aabca8ed07..6348e8330897c 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
@@ -42,7 +43,7 @@ struct serio {
struct serio *parent, *child;
struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */
- struct semaphore drv_sem; /* protects serio->drv so attributes can pin driver */
+ struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
struct device dev;
unsigned int registered; /* port has been fully registered with driver core */
@@ -151,17 +152,17 @@ static inline void serio_continue_rx(struct serio *serio)
*/
static inline int serio_pin_driver(struct serio *serio)
{
- return down_interruptible(&serio->drv_sem);
+ return mutex_lock_interruptible(&serio->drv_mutex);
}
static inline void serio_pin_driver_uninterruptible(struct serio *serio)
{
- down(&serio->drv_sem);
+ mutex_lock(&serio->drv_mutex);
}
static inline void serio_unpin_driver(struct serio *serio)
{
- up(&serio->drv_sem);
+ mutex_unlock(&serio->drv_mutex);
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 613b9513f8b91..c4619a428d9b1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -941,6 +941,25 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
#define NET_IP_ALIGN 2
#endif
+/*
+ * The networking layer reserves some headroom in skb data (via
+ * dev_alloc_skb). This is used to avoid having to reallocate skb data when
+ * the header has to grow. In the default case, if the header has to grow
+ * 16 bytes or less we avoid the reallocation.
+ *
+ * Unfortunately this headroom changes the DMA alignment of the resulting
+ * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
+ * on some architectures. An architecture can override this value,
+ * perhaps setting it to a cacheline in size (since that will maintain
+ * cacheline alignment of the DMA). It must be a power of 2.
+ *
+ * Various parts of the networking layer expect at least 16 bytes of
+ * headroom, you should not reduce this.
+ */
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
@@ -1030,9 +1049,9 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
gfp_t gfp_mask)
{
- struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
+ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
if (likely(skb))
- skb_reserve(skb, 16);
+ skb_reserve(skb, NET_SKB_PAD);
return skb;
}
#else
@@ -1070,13 +1089,15 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length)
*/
static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
- int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
+ int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) -
+ skb_headroom(skb);
if (delta < 0)
delta = 0;
if (delta || skb_cloned(skb))
- return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
+ return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
+ ~(NET_SKB_PAD-1), 0, GFP_ATOMIC);
return 0;
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index e78ffc7d5b561..5717147596b6c 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -571,5 +571,7 @@ asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
asmlinkage long sys_unshare(unsigned long unshare_flags);
asmlinkage long sys_splice(int fdin, int fdout, size_t len,
unsigned int flags);
+asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
+ int flags);
#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index b5caabca553ce..0a485beba9f51 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -6,7 +6,7 @@
#include <linux/spinlock.h>
#include <linux/stddef.h>
-struct timer_base_s;
+struct tvec_t_base_s;
struct timer_list {
struct list_head entry;
@@ -15,16 +15,16 @@ struct timer_list {
void (*function)(unsigned long);
unsigned long data;
- struct timer_base_s *base;
+ struct tvec_t_base_s *base;
};
-extern struct timer_base_s __init_timer_base;
+extern struct tvec_t_base_s boot_tvec_bases;
#define TIMER_INITIALIZER(_function, _expires, _data) { \
.function = (_function), \
.expires = (_expires), \
.data = (_data), \
- .base = &__init_timer_base, \
+ .base = &boot_tvec_bases, \
}
#define DEFINE_TIMER(_name, _function, _expires, _data) \
diff --git a/include/linux/tiocl.h b/include/linux/tiocl.h
index 2c9e847f6ed1d..4756862c4ed4c 100644
--- a/include/linux/tiocl.h
+++ b/include/linux/tiocl.h
@@ -34,5 +34,6 @@ struct tiocl_selection {
#define TIOCL_SCROLLCONSOLE 13 /* scroll console */
#define TIOCL_BLANKSCREEN 14 /* keep screen blank even if a key is pressed */
#define TIOCL_BLANKEDSCREEN 15 /* return which vt was blanked */
+#define TIOCL_GETKMSGREDIRECT 17 /* get the vt the kernel messages are restricted to */
#endif /* _LINUX_TIOCL_H */
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 0ff7ca68e5c53..7168302f98441 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -20,7 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
- *
+ *
* Changes/Revisions:
* 0.2 16/10/2004 (Micah Dowty <micah@navi.cx>)
* - added force feedback support
@@ -51,7 +51,7 @@ struct uinput_request {
struct uinput_device {
struct input_dev *dev;
- struct semaphore sem;
+ struct mutex mutex;
enum uinput_state state;
wait_queue_head_t waitq;
unsigned char ready;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index af2d6155d3fe2..d7670ec1ec1e1 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -966,66 +966,17 @@ struct v4l2_sliced_vbi_format
/* Teletext World System Teletext
(WST), defined on ITU-R BT.653-2 */
-#define V4L2_SLICED_TELETEXT_PAL_B (0x000001)
-#define V4L2_SLICED_TELETEXT_PAL_C (0x000002)
-#define V4L2_SLICED_TELETEXT_NTSC_B (0x000010)
-#define V4L2_SLICED_TELETEXT_SECAM (0x000020)
-
-/* Teletext North American Broadcast Teletext Specification
- (NABTS), defined on ITU-R BT.653-2 */
-#define V4L2_SLICED_TELETEXT_NTSC_C (0x000040)
-#define V4L2_SLICED_TELETEXT_NTSC_D (0x000080)
-
+#define V4L2_SLICED_TELETEXT_B (0x0001)
/* Video Program System, defined on ETS 300 231*/
-#define V4L2_SLICED_VPS (0x000400)
-
+#define V4L2_SLICED_VPS (0x0400)
/* Closed Caption, defined on EIA-608 */
-#define V4L2_SLICED_CAPTION_525 (0x001000)
-#define V4L2_SLICED_CAPTION_625 (0x002000)
-
+#define V4L2_SLICED_CAPTION_525 (0x1000)
/* Wide Screen System, defined on ITU-R BT1119.1 */
-#define V4L2_SLICED_WSS_625 (0x004000)
-
-/* Wide Screen System, defined on IEC 61880 */
-#define V4L2_SLICED_WSS_525 (0x008000)
-
-/* Vertical Interval Timecode (VITC), defined on SMPTE 12M */
-#define V4l2_SLICED_VITC_625 (0x010000)
-#define V4l2_SLICED_VITC_525 (0x020000)
-
-#define V4L2_SLICED_TELETEXT_B (V4L2_SLICED_TELETEXT_PAL_B |\
- V4L2_SLICED_TELETEXT_NTSC_B)
-
-#define V4L2_SLICED_TELETEXT (V4L2_SLICED_TELETEXT_PAL_B |\
- V4L2_SLICED_TELETEXT_PAL_C |\
- V4L2_SLICED_TELETEXT_SECAM |\
- V4L2_SLICED_TELETEXT_NTSC_B |\
- V4L2_SLICED_TELETEXT_NTSC_C |\
- V4L2_SLICED_TELETEXT_NTSC_D)
-
-#define V4L2_SLICED_CAPTION (V4L2_SLICED_CAPTION_525 |\
- V4L2_SLICED_CAPTION_625)
-
-#define V4L2_SLICED_WSS (V4L2_SLICED_WSS_525 |\
- V4L2_SLICED_WSS_625)
-
-#define V4L2_SLICED_VITC (V4L2_SLICED_VITC_525 |\
- V4L2_SLICED_VITC_625)
-
-#define V4L2_SLICED_VBI_525 (V4L2_SLICED_TELETEXT_NTSC_B |\
- V4L2_SLICED_TELETEXT_NTSC_C |\
- V4L2_SLICED_TELETEXT_NTSC_D |\
- V4L2_SLICED_CAPTION_525 |\
- V4L2_SLICED_WSS_525 |\
- V4l2_SLICED_VITC_525)
-
-#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_PAL_B |\
- V4L2_SLICED_TELETEXT_PAL_C |\
- V4L2_SLICED_TELETEXT_SECAM |\
- V4L2_SLICED_VPS |\
- V4L2_SLICED_CAPTION_625 |\
- V4L2_SLICED_WSS_625 |\
- V4l2_SLICED_VITC_625)
+#define V4L2_SLICED_WSS_625 (0x4000)
+
+#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525)
+#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625)
+
struct v4l2_sliced_vbi_cap
{
diff --git a/include/media/cx25840.h b/include/media/cx25840.h
new file mode 100644
index 0000000000000..8e7e52d659a0d
--- /dev/null
+++ b/include/media/cx25840.h
@@ -0,0 +1,64 @@
+/*
+ cx25840.h - definition for cx25840/1/2/3 inputs
+
+ Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _CX25840_H_
+#define _CX25840_H_
+
+enum cx25840_video_input {
+ /* Composite video inputs In1-In8 */
+ CX25840_COMPOSITE1 = 1,
+ CX25840_COMPOSITE2,
+ CX25840_COMPOSITE3,
+ CX25840_COMPOSITE4,
+ CX25840_COMPOSITE5,
+ CX25840_COMPOSITE6,
+ CX25840_COMPOSITE7,
+ CX25840_COMPOSITE8,
+
+ /* S-Video inputs consist of one luma input (In1-In4) ORed with one
+ chroma input (In5-In8) */
+ CX25840_SVIDEO_LUMA1 = 0x10,
+ CX25840_SVIDEO_LUMA2 = 0x20,
+ CX25840_SVIDEO_LUMA3 = 0x30,
+ CX25840_SVIDEO_LUMA4 = 0x40,
+ CX25840_SVIDEO_CHROMA4 = 0x400,
+ CX25840_SVIDEO_CHROMA5 = 0x500,
+ CX25840_SVIDEO_CHROMA6 = 0x600,
+ CX25840_SVIDEO_CHROMA7 = 0x700,
+ CX25840_SVIDEO_CHROMA8 = 0x800,
+
+ /* S-Video aliases for common luma/chroma combinations */
+ CX25840_SVIDEO1 = 0x510,
+ CX25840_SVIDEO2 = 0x620,
+ CX25840_SVIDEO3 = 0x730,
+ CX25840_SVIDEO4 = 0x840,
+};
+
+enum cx25840_audio_input {
+ /* Audio inputs: serial or In4-In8 */
+ CX25840_AUDIO_SERIAL,
+ CX25840_AUDIO4 = 4,
+ CX25840_AUDIO5,
+ CX25840_AUDIO6,
+ CX25840_AUDIO7,
+ CX25840_AUDIO8,
+};
+
+#endif
diff --git a/include/media/msp3400.h b/include/media/msp3400.h
index 0be61a021d45c..6ab854931c059 100644
--- a/include/media/msp3400.h
+++ b/include/media/msp3400.h
@@ -80,16 +80,16 @@
*/
/* SCART input to DSP selection */
-#define MSP_IN_SCART_1 0 /* Pin SC1_IN */
-#define MSP_IN_SCART_2 1 /* Pin SC2_IN */
-#define MSP_IN_SCART_3 2 /* Pin SC3_IN */
-#define MSP_IN_SCART_4 3 /* Pin SC4_IN */
+#define MSP_IN_SCART1 0 /* Pin SC1_IN */
+#define MSP_IN_SCART2 1 /* Pin SC2_IN */
+#define MSP_IN_SCART3 2 /* Pin SC3_IN */
+#define MSP_IN_SCART4 3 /* Pin SC4_IN */
#define MSP_IN_MONO 6 /* Pin MONO_IN */
#define MSP_IN_MUTE 7 /* Mute DSP input */
#define MSP_SCART_TO_DSP(in) (in)
/* Tuner input to demodulator and DSP selection */
-#define MSP_IN_TUNER_1 0 /* Analog Sound IF input pin ANA_IN1 */
-#define MSP_IN_TUNER_2 1 /* Analog Sound IF input pin ANA_IN2 */
+#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */
+#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */
#define MSP_TUNER_TO_DSP(in) ((in) << 3)
/* The msp has up to 5 DSP outputs, each output can independently select
@@ -109,14 +109,14 @@
DSP. This is currently not implemented. Also not implemented is the
multi-channel capable I2S3 input of the 44x0G. If someone can demonstrate
a need for one of those features then additional support can be added. */
-#define MSP_DSP_OUT_TUNER 0 /* Tuner output */
-#define MSP_DSP_OUT_SCART 2 /* SCART output */
-#define MSP_DSP_OUT_I2S1 5 /* I2S1 output */
-#define MSP_DSP_OUT_I2S2 6 /* I2S2 output */
-#define MSP_DSP_OUT_I2S3 7 /* I2S3 output */
-#define MSP_DSP_OUT_MAIN_AVC 11 /* MAIN AVC processed output */
-#define MSP_DSP_OUT_MAIN 12 /* MAIN output */
-#define MSP_DSP_OUT_AUX 13 /* AUX output */
+#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */
+#define MSP_DSP_IN_SCART 2 /* SCART DSP input */
+#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */
+#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */
+#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */
+#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */
+#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */
+#define MSP_DSP_IN_AUX 13 /* AUX DSP input */
#define MSP_DSP_TO_MAIN(in) ((in) << 4)
#define MSP_DSP_TO_AUX(in) ((in) << 8)
#define MSP_DSP_TO_SCART1(in) ((in) << 12)
@@ -125,16 +125,16 @@
/* Output SCART select: the SCART outputs can select which input
to use. */
-#define MSP_OUT_SCART1 0 /* SCART1 input, bypassing the DSP */
-#define MSP_OUT_SCART2 1 /* SCART2 input, bypassing the DSP */
-#define MSP_OUT_SCART3 2 /* SCART3 input, bypassing the DSP */
-#define MSP_OUT_SCART4 3 /* SCART4 input, bypassing the DSP */
-#define MSP_OUT_SCART1_DA 4 /* DSP SCART1 output */
-#define MSP_OUT_SCART2_DA 5 /* DSP SCART2 output */
-#define MSP_OUT_MONO 6 /* MONO input, bypassing the DSP */
-#define MSP_OUT_MUTE 7 /* MUTE output */
-#define MSP_OUT_TO_SCART1(in) (in)
-#define MSP_OUT_TO_SCART2(in) ((in) << 4)
+#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */
+#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */
+#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */
+#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */
+#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */
+#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */
+#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */
+#define MSP_SC_IN_MUTE 7 /* MUTE output */
+#define MSP_SC_TO_SCART1(in) (in)
+#define MSP_SC_TO_SCART2(in) ((in) << 4)
/* Shortcut macros */
#define MSP_INPUT(sc, t, main_aux_src, sc_i2s_src) \
@@ -145,14 +145,14 @@
MSP_DSP_TO_SCART1(sc_i2s_src) | \
MSP_DSP_TO_SCART2(sc_i2s_src) | \
MSP_DSP_TO_I2S(sc_i2s_src))
-#define MSP_INPUT_DEFAULT MSP_INPUT(MSP_IN_SCART_1, MSP_IN_TUNER_1, \
- MSP_DSP_OUT_TUNER, MSP_DSP_OUT_TUNER)
+#define MSP_INPUT_DEFAULT MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \
+ MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER)
#define MSP_OUTPUT(sc) \
- (MSP_OUT_TO_SCART1(sc) | \
- MSP_OUT_TO_SCART2(sc))
+ (MSP_SC_TO_SCART1(sc) | \
+ MSP_SC_TO_SCART2(sc))
/* This equals the RESET position of the msp3400 ACB register */
-#define MSP_OUTPUT_DEFAULT (MSP_OUT_TO_SCART1(MSP_OUT_SCART3) | \
- MSP_OUT_TO_SCART2(MSP_OUT_SCART1_DA))
+#define MSP_OUTPUT_DEFAULT (MSP_SC_TO_SCART1(MSP_SC_IN_SCART3) | \
+ MSP_SC_TO_SCART2(MSP_SC_IN_DSP_SCART1))
/* Tuner inputs vs. msp version */
/* Chip TUNER_1 TUNER_2
diff --git a/include/media/saa7115.h b/include/media/saa7115.h
new file mode 100644
index 0000000000000..6b4836f3f0572
--- /dev/null
+++ b/include/media/saa7115.h
@@ -0,0 +1,37 @@
+/*
+ saa7115.h - definition for saa7113/4/5 inputs
+
+ Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _SAA7115_H_
+#define _SAA7115_H_
+
+/* SAA7113/4/5 HW inputs */
+#define SAA7115_COMPOSITE0 0
+#define SAA7115_COMPOSITE1 1
+#define SAA7115_COMPOSITE2 2
+#define SAA7115_COMPOSITE3 3
+#define SAA7115_COMPOSITE4 4 /* not available for the saa7113 */
+#define SAA7115_COMPOSITE5 5 /* not available for the saa7113 */
+#define SAA7115_SVIDEO0 6
+#define SAA7115_SVIDEO1 7
+#define SAA7115_SVIDEO2 8
+#define SAA7115_SVIDEO3 9
+
+#endif
+
diff --git a/include/media/saa7127.h b/include/media/saa7127.h
new file mode 100644
index 0000000000000..bbcf862141af1
--- /dev/null
+++ b/include/media/saa7127.h
@@ -0,0 +1,41 @@
+/*
+ saa7127.h - definition for saa7126/7/8/9 inputs/outputs
+
+ Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _SAA7127_H_
+#define _SAA7127_H_
+
+/* Enumeration for the supported input types */
+enum saa7127_input_type {
+ SAA7127_INPUT_TYPE_NORMAL,
+ SAA7127_INPUT_TYPE_TEST_IMAGE
+};
+
+/* Enumeration for the supported output signal types */
+enum saa7127_output_type {
+ SAA7127_OUTPUT_TYPE_BOTH,
+ SAA7127_OUTPUT_TYPE_COMPOSITE,
+ SAA7127_OUTPUT_TYPE_SVIDEO,
+ SAA7127_OUTPUT_TYPE_RGB,
+ SAA7127_OUTPUT_TYPE_YUV_C,
+ SAA7127_OUTPUT_TYPE_YUV_V
+};
+
+#endif
+
diff --git a/include/media/upd64031a.h b/include/media/upd64031a.h
new file mode 100644
index 0000000000000..3ad6a32e1bce2
--- /dev/null
+++ b/include/media/upd64031a.h
@@ -0,0 +1,40 @@
+/*
+ * upd64031a - NEC Electronics Ghost Reduction input defines
+ *
+ * 2006 by Hans Verkuil (hverkuil@xs4all.nl)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _UPD64031A_H_
+#define _UPD64031A_H_
+
+/* Ghost reduction modes */
+#define UPD64031A_GR_ON 0
+#define UPD64031A_GR_OFF 1
+#define UPD64031A_GR_THROUGH 3
+
+/* Direct 3D/YCS Connection */
+#define UPD64031A_3DYCS_DISABLE (0 << 2)
+#define UPD64031A_3DYCS_COMPOSITE (2 << 2)
+#define UPD64031A_3DYCS_SVIDEO (3 << 2)
+
+/* Composite sync digital separation circuit */
+#define UPD64031A_COMPOSITE_EXTERNAL (1 << 4)
+
+/* Vertical sync digital separation circuit */
+#define UPD64031A_VERTICAL_EXTERNAL (1 << 5)
+
+#endif
diff --git a/include/media/upd64083.h b/include/media/upd64083.h
new file mode 100644
index 0000000000000..59b6f32ba300e
--- /dev/null
+++ b/include/media/upd64083.h
@@ -0,0 +1,58 @@
+/*
+ * upd6408x - NEC Electronics 3-Dimensional Y/C separation input defines
+ *
+ * 2006 by Hans Verkuil (hverkuil@xs4all.nl)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _UPD64083_H_
+#define _UPD64083_H_
+
+/* There are two bits of information that the driver needs in order
+ to select the correct routing: the operating mode and the selection
+ of the Y input (external or internal).
+
+ The first two operating modes expect a composite signal on the Y input,
+ the second two operating modes use both the Y and C inputs.
+
+ Normally YCS_MODE is used for tuner and composite inputs, and the
+ YCNR mode is used for S-Video inputs.
+
+ The external Y-ADC is selected when the composite input comes from a
+ upd64031a ghost reduction device. If this device is not present, or
+ the input is a S-Video signal, then the internal Y-ADC input should
+ be used. */
+
+/* Operating modes: */
+
+/* YCS mode: Y/C separation (burst locked clocking) */
+#define UPD64083_YCS_MODE 0
+/* YCS+ mode: 2D Y/C separation and YCNR (burst locked clocking) */
+#define UPD64083_YCS_PLUS_MODE 1
+
+/* Note: the following two modes cannot be used in combination with the
+ external Y-ADC. */
+/* MNNR mode: frame comb type YNR+C delay (line locked clocking) */
+#define UPD64083_MNNR_MODE 2
+/* YCNR mode: frame recursive YCNR (burst locked clocking) */
+#define UPD64083_YCNR_MODE 3
+
+/* Select external Y-ADC: this should be set if this device is used in
+ combination with the upd64031a ghost reduction device.
+ Otherwise leave at 0 (use internal Y-ADC). */
+#define UPD64083_EXT_Y_ADC (1 << 2)
+
+#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 9418f4d1afbb9..3c989db8a7aa5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -405,9 +405,6 @@ extern int tcp_disconnect(struct sock *sk, int flags);
extern void tcp_unhash(struct sock *sk);
-extern int tcp_v4_hash_connecting(struct sock *sk);
-
-
/* From syncookies.c */
extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e100291e43f45..0d5529c382e8c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -242,7 +242,6 @@ extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
-struct xfrm_decap_state;
struct xfrm_type
{
char *description;
@@ -251,7 +250,7 @@ struct xfrm_type
int (*init_state)(struct xfrm_state *x);
void (*destructor)(struct xfrm_state *);
- int (*input)(struct xfrm_state *, struct xfrm_decap_state *, struct sk_buff *skb);
+ int (*input)(struct xfrm_state *, struct sk_buff *skb);
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
/* Estimate maximal size of result of transformation of a dgram */
u32 (*get_max_size)(struct xfrm_state *, int size);
@@ -606,25 +605,11 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
-/* Decapsulation state, used by the input to store data during
- * decapsulation procedure, to be used later (during the policy
- * check
- */
-struct xfrm_decap_state {
- char decap_data[20];
- __u16 decap_type;
-};
-
-struct sec_decap_state {
- struct xfrm_state *xvec;
- struct xfrm_decap_state decap;
-};
-
struct sec_path
{
atomic_t refcnt;
int len;
- struct sec_decap_state x[XFRM_MAX_DEPTH];
+ struct xfrm_state *xvec[XFRM_MAX_DEPTH];
};
static inline struct sec_path *
diff --git a/include/pcmcia/bulkmem.h b/include/pcmcia/bulkmem.h
index b53b78d497ba4..6bc7472293b25 100644
--- a/include/pcmcia/bulkmem.h
+++ b/include/pcmcia/bulkmem.h
@@ -35,7 +35,7 @@ typedef struct region_info_t {
#define REGION_BAR_MASK 0xe000
#define REGION_BAR_SHIFT 13
-int pcmcia_get_first_region(client_handle_t handle, region_info_t *rgn);
-int pcmcia_get_next_region(client_handle_t handle, region_info_t *rgn);
+int pcmcia_get_first_region(struct pcmcia_device *handle, region_info_t *rgn);
+int pcmcia_get_next_region(struct pcmcia_device *handle, region_info_t *rgn);
#endif /* _LINUX_BULKMEM_H */
diff --git a/include/pcmcia/ciscode.h b/include/pcmcia/ciscode.h
index da19c297dd657..c1da8558339a2 100644
--- a/include/pcmcia/ciscode.h
+++ b/include/pcmcia/ciscode.h
@@ -1,5 +1,5 @@
/*
- * ciscode.h -- Definitions for bulk memory services
+ * ciscode.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -122,4 +122,7 @@
#define MANFID_XIRCOM 0x0105
+#define MANFID_POSSIO 0x030c
+#define PRODID_POSSIO_GCC 0x0003
+
#endif /* _LINUX_CISCODE_H */
diff --git a/include/pcmcia/cistpl.h b/include/pcmcia/cistpl.h
index c6a069554fd7e..d3bbb19caf818 100644
--- a/include/pcmcia/cistpl.h
+++ b/include/pcmcia/cistpl.h
@@ -586,12 +586,7 @@ typedef struct cisdump_t {
cisdata_t Data[CISTPL_MAX_CIS_SIZE];
} cisdump_t;
-int pcmcia_get_first_tuple(client_handle_t handle, tuple_t *tuple);
-int pcmcia_get_next_tuple(client_handle_t handle, tuple_t *tuple);
-int pcmcia_get_tuple_data(client_handle_t handle, tuple_t *tuple);
-int pcmcia_parse_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse);
-int pcmcia_validate_cis(client_handle_t handle, cisinfo_t *info);
int pcmcia_replace_cis(struct pcmcia_socket *s, cisdump_t *cis);
/* don't use outside of PCMCIA core yet */
@@ -602,4 +597,20 @@ int pccard_parse_tuple(tuple_t *tuple, cisparse_t *parse);
int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_t *info);
+/* ... but use these wrappers instead */
+#define pcmcia_get_first_tuple(p_dev, tuple) \
+ pccard_get_first_tuple(p_dev->socket, p_dev->func, tuple)
+
+#define pcmcia_get_next_tuple(p_dev, tuple) \
+ pccard_get_next_tuple(p_dev->socket, p_dev->func, tuple)
+
+#define pcmcia_get_tuple_data(p_dev, tuple) \
+ pccard_get_tuple_data(p_dev->socket, tuple)
+
+#define pcmcia_parse_tuple(p_dev, tuple, parse) \
+ pccard_parse_tuple(tuple, parse)
+
+#define pcmcia_validate_cis(p_dev, info) \
+ pccard_validate_cis(p_dev->socket, p_dev->func, info)
+
#endif /* LINUX_CISTPL_H */
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index 52660f32663d1..d5838c30d20f7 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -109,17 +109,6 @@ typedef struct client_req_t {
#define CLIENT_THIS_SOCKET 0x01
-/* For RegisterClient */
-typedef struct client_reg_t {
- dev_info_t *dev_info;
- u_int Attributes; /* UNUSED */
- u_int EventMask;
- int (*event_handler)(event_t event, int priority,
- event_callback_args_t *);
- event_callback_args_t event_callback_args;
- u_int Version;
-} client_reg_t;
-
/* ModifyConfiguration */
typedef struct modconf_t {
u_int Attributes;
@@ -127,15 +116,16 @@ typedef struct modconf_t {
} modconf_t;
/* Attributes for ModifyConfiguration */
-#define CONF_IRQ_CHANGE_VALID 0x100
-#define CONF_VCC_CHANGE_VALID 0x200
-#define CONF_VPP1_CHANGE_VALID 0x400
-#define CONF_VPP2_CHANGE_VALID 0x800
+#define CONF_IRQ_CHANGE_VALID 0x0100
+#define CONF_VCC_CHANGE_VALID 0x0200
+#define CONF_VPP1_CHANGE_VALID 0x0400
+#define CONF_VPP2_CHANGE_VALID 0x0800
+#define CONF_IO_CHANGE_WIDTH 0x1000
/* For RequestConfiguration */
typedef struct config_req_t {
u_int Attributes;
- u_int Vcc, Vpp1, Vpp2;
+ u_int Vpp; /* both Vpp1 and Vpp2 */
u_int IntType;
u_int ConfigBase;
u_char Status, Pin, Copy, ExtStatus;
@@ -389,23 +379,27 @@ int pcmcia_get_status(struct pcmcia_device *p_dev, cs_status_t *status);
int pcmcia_get_mem_page(window_handle_t win, memreq_t *req);
int pcmcia_map_mem_page(window_handle_t win, memreq_t *req);
int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod);
-int pcmcia_release_configuration(struct pcmcia_device *p_dev);
-int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req);
-int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req);
int pcmcia_release_window(window_handle_t win);
int pcmcia_request_configuration(struct pcmcia_device *p_dev, config_req_t *req);
int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req);
int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req);
int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_handle_t *wh);
-int pcmcia_reset_card(struct pcmcia_device *p_dev, client_req_t *req);
int pcmcia_suspend_card(struct pcmcia_socket *skt);
int pcmcia_resume_card(struct pcmcia_socket *skt);
int pcmcia_eject_card(struct pcmcia_socket *skt);
int pcmcia_insert_card(struct pcmcia_socket *skt);
+int pccard_reset_card(struct pcmcia_socket *skt);
+
+struct pcmcia_device * pcmcia_dev_present(struct pcmcia_device *p_dev);
+void pcmcia_disable_device(struct pcmcia_device *p_dev);
struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt);
void pcmcia_put_socket(struct pcmcia_socket *skt);
+/* compatibility functions */
+#define pcmcia_reset_card(p_dev, req) \
+ pccard_reset_card(p_dev->socket)
+
#endif /* __KERNEL__ */
#endif /* _LINUX_CS_H */
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index 8e2a96396478a..8c339f5678cf9 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -39,7 +39,7 @@ typedef struct win_info_t {
typedef struct bind_info_t {
dev_info_t dev_info;
u_char function;
- struct dev_link_t *instance;
+ struct pcmcia_device *instance;
char name[DEV_NAME_LEN];
u_short major, minor;
void *next;
@@ -96,6 +96,7 @@ typedef union ds_ioctl_arg_t {
#ifdef __KERNEL__
#include <linux/device.h>
+#include <pcmcia/ss.h>
typedef struct dev_node_t {
char dev_name[DEV_NAME_LEN];
@@ -103,34 +104,9 @@ typedef struct dev_node_t {
struct dev_node_t *next;
} dev_node_t;
-typedef struct dev_link_t {
- dev_node_t *dev;
- u_int state, open;
- wait_queue_head_t pending;
- client_handle_t handle;
- io_req_t io;
- irq_req_t irq;
- config_req_t conf;
- window_handle_t win;
- void *priv;
- struct dev_link_t *next;
-} dev_link_t;
-
-/* Flags for device state */
-#define DEV_PRESENT 0x01
-#define DEV_CONFIG 0x02
-#define DEV_STALE_CONFIG 0x04 /* release on close */
-#define DEV_STALE_LINK 0x08 /* detach on release */
-#define DEV_CONFIG_PENDING 0x10
-#define DEV_RELEASE_PENDING 0x20
-#define DEV_SUSPEND 0x40
-#define DEV_BUSY 0x80
-
-#define DEV_OK(l) \
- ((l) && ((l->state & ~DEV_BUSY) == (DEV_CONFIG|DEV_PRESENT)))
-
struct pcmcia_socket;
+struct config_t;
struct pcmcia_driver {
int (*probe) (struct pcmcia_device *dev);
@@ -148,6 +124,7 @@ struct pcmcia_driver {
int pcmcia_register_driver(struct pcmcia_driver *driver);
void pcmcia_unregister_driver(struct pcmcia_driver *driver);
+
struct pcmcia_device {
/* the socket and the device_no [for multifunction devices]
uniquely define a pcmcia_device */
@@ -160,21 +137,40 @@ struct pcmcia_device {
/* the hardware "function" device; certain subdevices can
* share one hardware "function" device. */
u8 func;
+ struct config_t* function_config;
struct list_head socket_device_list;
- /* deprecated, a cleaned up version will be moved into this
- struct soon */
- dev_link_t *instance;
- u_int state;
+ /* deprecated, will be cleaned up soon */
+ dev_node_t *dev_node;
+ u_int open;
+ io_req_t io;
+ irq_req_t irq;
+ config_req_t conf;
+ window_handle_t win;
+
+ /* Is the device suspended, or in the process of
+ * being removed? */
+ u16 suspended:1;
+ u16 _removed:1;
+
+ /* Flags whether io, irq, win configurations were
+ * requested, and whether the configuration is "locked" */
+ u16 _irq:1;
+ u16 _io:1;
+ u16 _win:4;
+ u16 _locked:1;
+
+ /* Flag whether a "fuzzy" func_id based match is
+ * allowed. */
+ u16 allow_func_id_match:1;
/* information about this device */
- u8 has_manf_id:1;
- u8 has_card_id:1;
- u8 has_func_id:1;
+ u16 has_manf_id:1;
+ u16 has_card_id:1;
+ u16 has_func_id:1;
- u8 allow_func_id_match:1;
- u8 reserved:4;
+ u16 reserved:3;
u8 func_id;
u16 manf_id;
@@ -182,22 +178,24 @@ struct pcmcia_device {
char * prod_id[4];
+ struct device dev;
+
+#ifdef CONFIG_PCMCIA_IOCTL
/* device driver wanted by cardmgr */
struct pcmcia_driver * cardmgr;
+#endif
- struct device dev;
+ /* data private to drivers */
+ void *priv;
};
#define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev)
#define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv)
-#define handle_to_pdev(handle) (handle)
#define handle_to_dev(handle) (handle->dev)
-#define dev_to_instance(dev) (dev->instance)
-
/* error reporting */
-void cs_error(client_handle_t handle, int func, int ret);
+void cs_error(struct pcmcia_device *handle, int func, int ret);
#endif /* __KERNEL__ */
#endif /* _LINUX_DS_H */
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 2889a69a7a8f4..5e0a01ab22163 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -18,6 +18,7 @@
#include <linux/config.h>
#include <linux/device.h>
#include <linux/sched.h> /* task_struct, completion */
+#include <linux/mutex.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
@@ -146,14 +147,15 @@ extern struct pccard_resource_ops pccard_static_ops;
/* !SS_CAP_STATIC_MAP */
extern struct pccard_resource_ops pccard_nonstatic_ops;
+/* static mem, dynamic IO sockets */
+extern struct pccard_resource_ops pccard_iodyn_ops;
+
/*
* Calls to set up low-level "Socket Services" drivers
*/
struct pcmcia_socket;
typedef struct io_window_t {
- u_int Attributes;
- kio_addr_t BasePort, NumPorts;
kio_addr_t InUse, Config;
struct resource *res;
} io_window_t;
@@ -162,7 +164,7 @@ typedef struct io_window_t {
typedef struct window_t {
u_short magic;
u_short index;
- client_handle_t handle;
+ struct pcmcia_device *handle;
struct pcmcia_socket *sock;
pccard_mem_map ctl;
} window_t;
@@ -186,7 +188,6 @@ struct pcmcia_socket {
u_short lock_count;
pccard_mem_map cis_mem;
void __iomem *cis_virt;
- struct config_t *config;
struct {
u_int AssignedIRQ;
u_int Config;
@@ -241,7 +242,7 @@ struct pcmcia_socket {
#endif
/* state thread */
- struct semaphore skt_sem; /* protects socket h/w state */
+ struct mutex skt_mutex; /* protects socket h/w state */
struct task_struct *thread;
struct completion thread_done;
diff --git a/ipc/shm.c b/ipc/shm.c
index f806a2e314e0b..6b0c9af5bbf7c 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -91,8 +91,8 @@ static inline int shm_addid(struct shmid_kernel *shp)
static inline void shm_inc (int id) {
struct shmid_kernel *shp;
- if(!(shp = shm_lock(id)))
- BUG();
+ shp = shm_lock(id);
+ BUG_ON(!shp);
shp->shm_atim = get_seconds();
shp->shm_lprid = current->tgid;
shp->shm_nattch++;
@@ -142,8 +142,8 @@ static void shm_close (struct vm_area_struct *shmd)
mutex_lock(&shm_ids.mutex);
/* remove from the list of attaches of the shm segment */
- if(!(shp = shm_lock(id)))
- BUG();
+ shp = shm_lock(id);
+ BUG_ON(!shp);
shp->shm_lprid = current->tgid;
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
@@ -283,8 +283,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
err = -EEXIST;
} else {
shp = shm_lock(id);
- if(shp==NULL)
- BUG();
+ BUG_ON(shp==NULL);
if (shp->shm_segsz < size)
err = -EINVAL;
else if (ipcperms(&shp->shm_perm, shmflg))
@@ -774,8 +773,8 @@ invalid:
up_write(&current->mm->mmap_sem);
mutex_lock(&shm_ids.mutex);
- if(!(shp = shm_lock(shmid)))
- BUG();
+ shp = shm_lock(shmid);
+ BUG_ON(!shp);
shp->shm_nattch--;
if(shp->shm_nattch == 0 &&
shp->shm_perm.mode & SHM_DEST)
diff --git a/ipc/util.c b/ipc/util.c
index 23151ef325903..5e785a29e1e63 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -266,8 +266,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
{
struct kern_ipc_perm* p;
int lid = id % SEQ_MULTIPLIER;
- if(lid >= ids->entries->size)
- BUG();
+ BUG_ON(lid >= ids->entries->size);
/*
* do not need a rcu_dereference()() here to force ordering
@@ -275,8 +274,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
*/
p = ids->entries->p[lid];
ids->entries->p[lid] = NULL;
- if(p==NULL)
- BUG();
+ BUG_ON(p==NULL);
ids->in_use--;
if (lid == ids->max_id) {
diff --git a/kernel/acct.c b/kernel/acct.c
index 065d8b4e51ef9..b327f4d201040 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -449,8 +449,8 @@ static void do_acct_process(long exitcode, struct file *file)
/* calculate run_time in nsec*/
do_posix_clock_monotonic_gettime(&uptime);
run_time = (u64)uptime.tv_sec*NSEC_PER_SEC + uptime.tv_nsec;
- run_time -= (u64)current->start_time.tv_sec*NSEC_PER_SEC
- + current->start_time.tv_nsec;
+ run_time -= (u64)current->group_leader->start_time.tv_sec * NSEC_PER_SEC
+ + current->group_leader->start_time.tv_nsec;
/* convert nsec -> AHZ */
elapsed = nsec_to_AHZ(run_time);
#if ACCT_VERSION==3
@@ -469,10 +469,10 @@ static void do_acct_process(long exitcode, struct file *file)
#endif
do_div(elapsed, AHZ);
ac.ac_btime = xtime.tv_sec - elapsed;
- jiffies = cputime_to_jiffies(cputime_add(current->group_leader->utime,
+ jiffies = cputime_to_jiffies(cputime_add(current->utime,
current->signal->utime));
ac.ac_utime = encode_comp_t(jiffies_to_AHZ(jiffies));
- jiffies = cputime_to_jiffies(cputime_add(current->group_leader->stime,
+ jiffies = cputime_to_jiffies(cputime_add(current->stime,
current->signal->stime));
ac.ac_stime = encode_comp_t(jiffies_to_AHZ(jiffies));
/* we really need to bite the bullet and change layout */
@@ -522,9 +522,9 @@ static void do_acct_process(long exitcode, struct file *file)
ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */
ac.ac_rw = encode_comp_t(ac.ac_io / 1024);
ac.ac_minflt = encode_comp_t(current->signal->min_flt +
- current->group_leader->min_flt);
+ current->min_flt);
ac.ac_majflt = encode_comp_t(current->signal->maj_flt +
- current->group_leader->maj_flt);
+ current->maj_flt);
ac.ac_swaps = encode_comp_t(0);
ac.ac_exitcode = exitcode;
diff --git a/kernel/audit.c b/kernel/audit.c
index 04fe2e301b611..c8ccbd09048f4 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -578,7 +578,7 @@ static int __init audit_enable(char *str)
audit_initialized ? "" : " (after initialization)");
if (audit_initialized)
audit_enabled = audit_default;
- return 0;
+ return 1;
}
__setup("audit=", audit_enable);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 18aea1bd12840..72248d1b9e3f7 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -616,12 +616,10 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
* current->cpuset if a task has its memory placement changed.
* Do not call this routine if in_interrupt().
*
- * Call without callback_mutex or task_lock() held. May be called
- * with or without manage_mutex held. Doesn't need task_lock to guard
- * against another task changing a non-NULL cpuset pointer to NULL,
- * as that is only done by a task on itself, and if the current task
- * is here, it is not simultaneously in the exit code NULL'ing its
- * cpuset pointer. This routine also might acquire callback_mutex and
+ * Call without callback_mutex or task_lock() held. May be
+ * called with or without manage_mutex held. Thanks in part to
+ * 'the_top_cpuset_hack', the tasks cpuset pointer will never
+ * be NULL. This routine also might acquire callback_mutex and
* current->mm->mmap_sem during call.
*
* Reading current->cpuset->mems_generation doesn't need task_lock
@@ -836,6 +834,55 @@ static int update_cpumask(struct cpuset *cs, char *buf)
}
/*
+ * cpuset_migrate_mm
+ *
+ * Migrate memory region from one set of nodes to another.
+ *
+ * Temporarilly set tasks mems_allowed to target nodes of migration,
+ * so that the migration code can allocate pages on these nodes.
+ *
+ * Call holding manage_mutex, so our current->cpuset won't change
+ * during this call, as manage_mutex holds off any attach_task()
+ * calls. Therefore we don't need to take task_lock around the
+ * call to guarantee_online_mems(), as we know no one is changing
+ * our tasks cpuset.
+ *
+ * Hold callback_mutex around the two modifications of our tasks
+ * mems_allowed to synchronize with cpuset_mems_allowed().
+ *
+ * While the mm_struct we are migrating is typically from some
+ * other task, the task_struct mems_allowed that we are hacking
+ * is for our current task, which must allocate new pages for that
+ * migrating memory region.
+ *
+ * We call cpuset_update_task_memory_state() before hacking
+ * our tasks mems_allowed, so that we are assured of being in
+ * sync with our tasks cpuset, and in particular, callbacks to
+ * cpuset_update_task_memory_state() from nested page allocations
+ * won't see any mismatch of our cpuset and task mems_generation
+ * values, so won't overwrite our hacked tasks mems_allowed
+ * nodemask.
+ */
+
+static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to)
+{
+ struct task_struct *tsk = current;
+
+ cpuset_update_task_memory_state();
+
+ mutex_lock(&callback_mutex);
+ tsk->mems_allowed = *to;
+ mutex_unlock(&callback_mutex);
+
+ do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+
+ mutex_lock(&callback_mutex);
+ guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed);
+ mutex_unlock(&callback_mutex);
+}
+
+/*
* Handle user request to change the 'mems' memory placement
* of a cpuset. Needs to validate the request, update the
* cpusets mems_allowed and mems_generation, and for each
@@ -947,10 +994,8 @@ static int update_nodemask(struct cpuset *cs, char *buf)
struct mm_struct *mm = mmarray[i];
mpol_rebind_mm(mm, &cs->mems_allowed);
- if (migrate) {
- do_migrate_pages(mm, &oldmem, &cs->mems_allowed,
- MPOL_MF_MOVE_ALL);
- }
+ if (migrate)
+ cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed);
mmput(mm);
}
@@ -1185,11 +1230,11 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
mm = get_task_mm(tsk);
if (mm) {
mpol_rebind_mm(mm, &to);
+ if (is_memory_migrate(cs))
+ cpuset_migrate_mm(mm, &from, &to);
mmput(mm);
}
- if (is_memory_migrate(cs))
- do_migrate_pages(tsk->mm, &from, &to, MPOL_MF_MOVE_ALL);
put_task_struct(tsk);
synchronize_rcu();
if (atomic_dec_and_test(&oldcs->count))
diff --git a/kernel/exit.c b/kernel/exit.c
index bc0ec674d3f4c..6c2eeb8f6390c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -127,6 +127,11 @@ static void __exit_signal(struct task_struct *tsk)
}
}
+static void delayed_put_task_struct(struct rcu_head *rhp)
+{
+ put_task_struct(container_of(rhp, struct task_struct, rcu));
+}
+
void release_task(struct task_struct * p)
{
int zap_leader;
@@ -168,7 +173,7 @@ repeat:
spin_unlock(&p->proc_lock);
proc_pid_flush(proc_dentry);
release_thread(p);
- put_task_struct(p);
+ call_rcu(&p->rcu, delayed_put_task_struct);
p = leader;
if (unlikely(zap_leader))
diff --git a/kernel/fork.c b/kernel/fork.c
index b3f7a1bb5e558..3384eb89cb1c4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -108,10 +108,8 @@ void free_task(struct task_struct *tsk)
}
EXPORT_SYMBOL(free_task);
-void __put_task_struct_cb(struct rcu_head *rhp)
+void __put_task_struct(struct task_struct *tsk)
{
- struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
-
WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
@@ -126,6 +124,12 @@ void __put_task_struct_cb(struct rcu_head *rhp)
free_task(tsk);
}
+void __put_task_struct_cb(struct rcu_head *rhp)
+{
+ struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
+ __put_task_struct(tsk);
+}
+
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -721,7 +725,7 @@ out_release:
free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
free_fd_array(new_fdt->fd, new_fdt->max_fds);
kmem_cache_free(files_cachep, newf);
- goto out;
+ return NULL;
}
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
@@ -1311,17 +1315,19 @@ long do_fork(unsigned long clone_flags,
{
struct task_struct *p;
int trace = 0;
- long pid = alloc_pidmap();
+ struct pid *pid = alloc_pid();
+ long nr;
- if (pid < 0)
+ if (!pid)
return -EAGAIN;
+ nr = pid->nr;
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (trace)
clone_flags |= CLONE_PTRACE;
}
- p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
+ p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
@@ -1348,7 +1354,7 @@ long do_fork(unsigned long clone_flags,
p->state = TASK_STOPPED;
if (unlikely (trace)) {
- current->ptrace_message = pid;
+ current->ptrace_message = nr;
ptrace_notify ((trace << 8) | SIGTRAP);
}
@@ -1358,10 +1364,10 @@ long do_fork(unsigned long clone_flags,
ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
}
} else {
- free_pidmap(pid);
- pid = PTR_ERR(p);
+ free_pid(pid);
+ nr = PTR_ERR(p);
}
- return pid;
+ return nr;
}
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
diff --git a/kernel/futex.c b/kernel/futex.c
index 9c9b2b6b22dd6..5699c512057b0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1039,9 +1039,11 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
int val2 = 0;
- if ((op == FUTEX_WAIT) && utime) {
+ if (utime && (op == FUTEX_WAIT)) {
if (copy_from_user(&t, utime, sizeof(t)) != 0)
return -EFAULT;
+ if (!timespec_valid(&t))
+ return -EINVAL;
timeout = timespec_to_jiffies(&t) + 1;
}
/*
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 54274fc853216..1ab6a0ea3d147 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -129,9 +129,11 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
int val2 = 0;
- if ((op == FUTEX_WAIT) && utime) {
+ if (utime && (op == FUTEX_WAIT)) {
if (get_compat_timespec(&t, utime))
return -EFAULT;
+ if (!timespec_valid(&t))
+ return -EINVAL;
timeout = timespec_to_jiffies(&t) + 1;
}
if (op >= FUTEX_REQUEUE)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 0237a556eb1f0..f181ff4dd32ec 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -606,6 +606,9 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base)
{
struct rb_node *node;
+ if (!base->first)
+ return;
+
if (base->get_softirq_time)
base->softirq_time = base->get_softirq_time();
@@ -655,29 +658,28 @@ void hrtimer_run_queues(void)
/*
* Sleep related functions:
*/
-
-struct sleep_hrtimer {
- struct hrtimer timer;
- struct task_struct *task;
- int expired;
-};
-
-static int nanosleep_wakeup(struct hrtimer *timer)
+static int hrtimer_wakeup(struct hrtimer *timer)
{
- struct sleep_hrtimer *t =
- container_of(timer, struct sleep_hrtimer, timer);
+ struct hrtimer_sleeper *t =
+ container_of(timer, struct hrtimer_sleeper, timer);
+ struct task_struct *task = t->task;
- t->expired = 1;
- wake_up_process(t->task);
+ t->task = NULL;
+ if (task)
+ wake_up_process(task);
return HRTIMER_NORESTART;
}
-static int __sched do_nanosleep(struct sleep_hrtimer *t, enum hrtimer_mode mode)
+void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task)
{
- t->timer.function = nanosleep_wakeup;
- t->task = current;
- t->expired = 0;
+ sl->timer.function = hrtimer_wakeup;
+ sl->task = task;
+}
+
+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
+{
+ hrtimer_init_sleeper(t, current);
do {
set_current_state(TASK_INTERRUPTIBLE);
@@ -685,18 +687,17 @@ static int __sched do_nanosleep(struct sleep_hrtimer *t, enum hrtimer_mode mode)
schedule();
- if (unlikely(!t->expired)) {
- hrtimer_cancel(&t->timer);
- mode = HRTIMER_ABS;
- }
- } while (!t->expired && !signal_pending(current));
+ hrtimer_cancel(&t->timer);
+ mode = HRTIMER_ABS;
+
+ } while (t->task && !signal_pending(current));
- return t->expired;
+ return t->task == NULL;
}
static long __sched nanosleep_restart(struct restart_block *restart)
{
- struct sleep_hrtimer t;
+ struct hrtimer_sleeper t;
struct timespec __user *rmtp;
struct timespec tu;
ktime_t time;
@@ -729,7 +730,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
struct restart_block *restart;
- struct sleep_hrtimer t;
+ struct hrtimer_sleeper t;
struct timespec tu;
ktime_t rem;
diff --git a/kernel/module.c b/kernel/module.c
index bd088a7c1499e..d24deb0dbbc94 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1254,6 +1254,7 @@ static inline int license_is_gpl_compatible(const char *license)
|| strcmp(license, "GPL v2") == 0
|| strcmp(license, "GPL and additional rights") == 0
|| strcmp(license, "Dual BSD/GPL") == 0
+ || strcmp(license, "Dual MIT/GPL") == 0
|| strcmp(license, "Dual MPL/GPL") == 0);
}
diff --git a/kernel/pid.c b/kernel/pid.c
index a9f2dfd006d2d..eeb836b65ca4e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -28,8 +28,9 @@
#include <linux/hash.h>
#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
-static struct hlist_head *pid_hash[PIDTYPE_MAX];
+static struct hlist_head *pid_hash;
static int pidhash_shift;
+static kmem_cache_t *pid_cachep;
int pid_max = PID_MAX_DEFAULT;
int last_pid;
@@ -60,9 +61,22 @@ typedef struct pidmap {
static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
{ [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
+/*
+ * Note: disable interrupts while the pidmap_lock is held as an
+ * interrupt might come in and do read_lock(&tasklist_lock).
+ *
+ * If we don't disable interrupts there is a nasty deadlock between
+ * detach_pid()->free_pid() and another cpu that does
+ * spin_lock(&pidmap_lock) followed by an interrupt routine that does
+ * read_lock(&tasklist_lock);
+ *
+ * After we clean up the tasklist_lock and know there are no
+ * irq handlers that take it we can leave the interrupts enabled.
+ * For now it is easier to be safe than to prove it can't happen.
+ */
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
-fastcall void free_pidmap(int pid)
+static fastcall void free_pidmap(int pid)
{
pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
int offset = pid & BITS_PER_PAGE_MASK;
@@ -71,7 +85,7 @@ fastcall void free_pidmap(int pid)
atomic_inc(&map->nr_free);
}
-int alloc_pidmap(void)
+static int alloc_pidmap(void)
{
int i, offset, max_scan, pid, last = last_pid;
pidmap_t *map;
@@ -89,12 +103,12 @@ int alloc_pidmap(void)
* Free the page if someone raced with us
* installing it:
*/
- spin_lock(&pidmap_lock);
+ spin_lock_irq(&pidmap_lock);
if (map->page)
free_page(page);
else
map->page = (void *)page;
- spin_unlock(&pidmap_lock);
+ spin_unlock_irq(&pidmap_lock);
if (unlikely(!map->page))
break;
}
@@ -131,13 +145,73 @@ int alloc_pidmap(void)
return -1;
}
-struct pid * fastcall find_pid(enum pid_type type, int nr)
+fastcall void put_pid(struct pid *pid)
+{
+ if (!pid)
+ return;
+ if ((atomic_read(&pid->count) == 1) ||
+ atomic_dec_and_test(&pid->count))
+ kmem_cache_free(pid_cachep, pid);
+}
+
+static void delayed_put_pid(struct rcu_head *rhp)
+{
+ struct pid *pid = container_of(rhp, struct pid, rcu);
+ put_pid(pid);
+}
+
+fastcall void free_pid(struct pid *pid)
+{
+ /* We can be called with write_lock_irq(&tasklist_lock) held */
+ unsigned long flags;
+
+ spin_lock_irqsave(&pidmap_lock, flags);
+ hlist_del_rcu(&pid->pid_chain);
+ spin_unlock_irqrestore(&pidmap_lock, flags);
+
+ free_pidmap(pid->nr);
+ call_rcu(&pid->rcu, delayed_put_pid);
+}
+
+struct pid *alloc_pid(void)
+{
+ struct pid *pid;
+ enum pid_type type;
+ int nr = -1;
+
+ pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
+ if (!pid)
+ goto out;
+
+ nr = alloc_pidmap();
+ if (nr < 0)
+ goto out_free;
+
+ atomic_set(&pid->count, 1);
+ pid->nr = nr;
+ for (type = 0; type < PIDTYPE_MAX; ++type)
+ INIT_HLIST_HEAD(&pid->tasks[type]);
+
+ spin_lock_irq(&pidmap_lock);
+ hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
+ spin_unlock_irq(&pidmap_lock);
+
+out:
+ return pid;
+
+out_free:
+ kmem_cache_free(pid_cachep, pid);
+ pid = NULL;
+ goto out;
+}
+
+struct pid * fastcall find_pid(int nr)
{
struct hlist_node *elem;
struct pid *pid;
hlist_for_each_entry_rcu(pid, elem,
- &pid_hash[type][pid_hashfn(nr)], pid_chain) {
+ &pid_hash[pid_hashfn(nr)], pid_chain) {
if (pid->nr == nr)
return pid;
}
@@ -146,77 +220,82 @@ struct pid * fastcall find_pid(enum pid_type type, int nr)
int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
{
- struct pid *pid, *task_pid;
-
- task_pid = &task->pids[type];
- pid = find_pid(type, nr);
- task_pid->nr = nr;
- if (pid == NULL) {
- INIT_LIST_HEAD(&task_pid->pid_list);
- hlist_add_head_rcu(&task_pid->pid_chain,
- &pid_hash[type][pid_hashfn(nr)]);
- } else {
- INIT_HLIST_NODE(&task_pid->pid_chain);
- list_add_tail_rcu(&task_pid->pid_list, &pid->pid_list);
- }
+ struct pid_link *link;
+ struct pid *pid;
+
+ WARN_ON(!task->pid); /* to be removed soon */
+ WARN_ON(!nr); /* to be removed soon */
+
+ link = &task->pids[type];
+ link->pid = pid = find_pid(nr);
+ hlist_add_head_rcu(&link->node, &pid->tasks[type]);
return 0;
}
-static fastcall int __detach_pid(task_t *task, enum pid_type type)
+void fastcall detach_pid(task_t *task, enum pid_type type)
{
- struct pid *pid, *pid_next;
- int nr = 0;
+ struct pid_link *link;
+ struct pid *pid;
+ int tmp;
- pid = &task->pids[type];
- if (!hlist_unhashed(&pid->pid_chain)) {
+ link = &task->pids[type];
+ pid = link->pid;
- if (list_empty(&pid->pid_list)) {
- nr = pid->nr;
- hlist_del_rcu(&pid->pid_chain);
- } else {
- pid_next = list_entry(pid->pid_list.next,
- struct pid, pid_list);
- /* insert next pid from pid_list to hash */
- hlist_replace_rcu(&pid->pid_chain,
- &pid_next->pid_chain);
- }
- }
+ hlist_del_rcu(&link->node);
+ link->pid = NULL;
- list_del_rcu(&pid->pid_list);
- pid->nr = 0;
+ for (tmp = PIDTYPE_MAX; --tmp >= 0; )
+ if (!hlist_empty(&pid->tasks[tmp]))
+ return;
- return nr;
+ free_pid(pid);
}
-void fastcall detach_pid(task_t *task, enum pid_type type)
+struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
{
- int tmp, nr;
+ struct task_struct *result = NULL;
+ if (pid) {
+ struct hlist_node *first;
+ first = rcu_dereference(pid->tasks[type].first);
+ if (first)
+ result = hlist_entry(first, struct task_struct, pids[(type)].node);
+ }
+ return result;
+}
- nr = __detach_pid(task, type);
- if (!nr)
- return;
+/*
+ * Must be called under rcu_read_lock() or with tasklist_lock read-held.
+ */
+task_t *find_task_by_pid_type(int type, int nr)
+{
+ return pid_task(find_pid(nr), type);
+}
- for (tmp = PIDTYPE_MAX; --tmp >= 0; )
- if (tmp != type && find_pid(tmp, nr))
- return;
+EXPORT_SYMBOL(find_task_by_pid_type);
- free_pidmap(nr);
+struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
+{
+ struct task_struct *result;
+ rcu_read_lock();
+ result = pid_task(pid, type);
+ if (result)
+ get_task_struct(result);
+ rcu_read_unlock();
+ return result;
}
-task_t *find_task_by_pid_type(int type, int nr)
+struct pid *find_get_pid(pid_t nr)
{
struct pid *pid;
- pid = find_pid(type, nr);
- if (!pid)
- return NULL;
+ rcu_read_lock();
+ pid = get_pid(find_pid(nr));
+ rcu_read_unlock();
- return pid_task(&pid->pid_list, type);
+ return pid;
}
-EXPORT_SYMBOL(find_task_by_pid_type);
-
/*
* The pid hash table is scaled according to the amount of memory in the
* machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
@@ -224,7 +303,7 @@ EXPORT_SYMBOL(find_task_by_pid_type);
*/
void __init pidhash_init(void)
{
- int i, j, pidhash_size;
+ int i, pidhash_size;
unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
pidhash_shift = max(4, fls(megabytes * 4));
@@ -233,16 +312,13 @@ void __init pidhash_init(void)
printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
pidhash_size, pidhash_shift,
- PIDTYPE_MAX * pidhash_size * sizeof(struct hlist_head));
-
- for (i = 0; i < PIDTYPE_MAX; i++) {
- pid_hash[i] = alloc_bootmem(pidhash_size *
- sizeof(*(pid_hash[i])));
- if (!pid_hash[i])
- panic("Could not alloc pidhash!\n");
- for (j = 0; j < pidhash_size; j++)
- INIT_HLIST_HEAD(&pid_hash[i][j]);
- }
+ pidhash_size * sizeof(struct hlist_head));
+
+ pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
+ if (!pid_hash)
+ panic("Could not alloc pidhash!\n");
+ for (i = 0; i < pidhash_size; i++)
+ INIT_HLIST_HEAD(&pid_hash[i]);
}
void __init pidmap_init(void)
@@ -251,4 +327,8 @@ void __init pidmap_init(void)
/* Reserve PID 0. We never call free_pidmap(0) */
set_bit(0, pidmap_array->page);
atomic_dec(&pidmap_array->nr_free);
+
+ pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
+ __alignof__(struct pid),
+ SLAB_PANIC, NULL, NULL);
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9fd8d4f035951..ce0dfb8f4a4ec 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -41,7 +41,7 @@ config SOFTWARE_SUSPEND
depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
---help---
Enable the possibility of suspending the machine.
- It doesn't need APM.
+ It doesn't need ACPI or APM.
You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
(patch for sysvinit needed).
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 8ac7c35fad770..b2a5f671d6cd3 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -26,8 +26,7 @@ static inline int freezeable(struct task_struct * p)
(p->flags & PF_NOFREEZE) ||
(p->exit_state == EXIT_ZOMBIE) ||
(p->exit_state == EXIT_DEAD) ||
- (p->state == TASK_STOPPED) ||
- (p->state == TASK_TRACED))
+ (p->state == TASK_STOPPED))
return 0;
return 1;
}
diff --git a/kernel/printk.c b/kernel/printk.c
index 8cc19431e74ba..c056f3324432c 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -360,8 +360,7 @@ static void call_console_drivers(unsigned long start, unsigned long end)
unsigned long cur_index, start_print;
static int msg_level = -1;
- if (((long)(start - end)) > 0)
- BUG();
+ BUG_ON(((long)(start - end)) > 0);
cur_index = start;
start_print = start;
@@ -708,8 +707,7 @@ int __init add_preferred_console(char *name, int idx, char *options)
*/
void acquire_console_sem(void)
{
- if (in_interrupt())
- BUG();
+ BUG_ON(in_interrupt());
down(&console_sem);
console_locked = 1;
console_may_schedule = 1;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 86a7f6c60cb2f..0eeb7e66722c8 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -30,8 +30,7 @@
*/
void __ptrace_link(task_t *child, task_t *new_parent)
{
- if (!list_empty(&child->ptrace_list))
- BUG();
+ BUG_ON(!list_empty(&child->ptrace_list));
if (child->parent == new_parent)
return;
list_add(&child->ptrace_list, &child->parent->ptrace_children);
diff --git a/kernel/sched.c b/kernel/sched.c
index a9ecac398bb9b..dd153d6f8a04b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -667,9 +667,13 @@ static int effective_prio(task_t *p)
/*
* __activate_task - move a task to the runqueue.
*/
-static inline void __activate_task(task_t *p, runqueue_t *rq)
+static void __activate_task(task_t *p, runqueue_t *rq)
{
- enqueue_task(p, rq->active);
+ prio_array_t *target = rq->active;
+
+ if (batch_task(p))
+ target = rq->expired;
+ enqueue_task(p, target);
rq->nr_running++;
}
@@ -688,7 +692,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
unsigned long long __sleep_time = now - p->timestamp;
unsigned long sleep_time;
- if (unlikely(p->policy == SCHED_BATCH))
+ if (batch_task(p))
sleep_time = 0;
else {
if (__sleep_time > NS_MAX_SLEEP_AVG)
@@ -700,21 +704,25 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
if (likely(sleep_time > 0)) {
/*
* User tasks that sleep a long time are categorised as
- * idle and will get just interactive status to stay active &
- * prevent them suddenly becoming cpu hogs and starving
- * other processes.
+ * idle. They will only have their sleep_avg increased to a
+ * level that makes them just interactive priority to stay
+ * active yet prevent them suddenly becoming cpu hogs and
+ * starving other processes.
*/
- if (p->mm && p->activated != -1 &&
- sleep_time > INTERACTIVE_SLEEP(p)) {
- p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
- DEF_TIMESLICE);
+ if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) {
+ unsigned long ceiling;
+
+ ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG -
+ DEF_TIMESLICE);
+ if (p->sleep_avg < ceiling)
+ p->sleep_avg = ceiling;
} else {
/*
* Tasks waking from uninterruptible sleep are
* limited in their sleep_avg rise as they
* are likely to be waiting on I/O
*/
- if (p->activated == -1 && p->mm) {
+ if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
sleep_time = 0;
else if (p->sleep_avg + sleep_time >=
@@ -769,7 +777,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
* This checks to make sure it's not an uninterruptible task
* that is now waking up.
*/
- if (!p->activated) {
+ if (p->sleep_type == SLEEP_NORMAL) {
/*
* Tasks which were woken up by interrupts (ie. hw events)
* are most likely of interactive nature. So we give them
@@ -778,13 +786,13 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
* on a CPU, first time around:
*/
if (in_interrupt())
- p->activated = 2;
+ p->sleep_type = SLEEP_INTERRUPTED;
else {
/*
* Normal first-time wakeups get a credit too for
* on-runqueue time, but it will be weighted down:
*/
- p->activated = 1;
+ p->sleep_type = SLEEP_INTERACTIVE;
}
}
p->timestamp = now;
@@ -1272,19 +1280,19 @@ out_activate:
* Tasks on involuntary sleep don't earn
* sleep_avg beyond just interactive state.
*/
- p->activated = -1;
- }
+ p->sleep_type = SLEEP_NONINTERACTIVE;
+ } else
/*
* Tasks that have marked their sleep as noninteractive get
- * woken up without updating their sleep average. (i.e. their
- * sleep is handled in a priority-neutral manner, no priority
- * boost and no penalty.)
+ * woken up with their sleep average not weighted in an
+ * interactive way.
*/
- if (old_state & TASK_NONINTERACTIVE)
- __activate_task(p, rq);
- else
- activate_task(p, rq, cpu == this_cpu);
+ if (old_state & TASK_NONINTERACTIVE)
+ p->sleep_type = SLEEP_NONINTERACTIVE;
+
+
+ activate_task(p, rq, cpu == this_cpu);
/*
* Sync wakeups (i.e. those types of wakeups where the waker
* has indicated that it will leave the CPU in short order)
@@ -1658,6 +1666,21 @@ unsigned long nr_iowait(void)
return sum;
}
+unsigned long nr_active(void)
+{
+ unsigned long i, running = 0, uninterruptible = 0;
+
+ for_each_online_cpu(i) {
+ running += cpu_rq(i)->nr_running;
+ uninterruptible += cpu_rq(i)->nr_uninterruptible;
+ }
+
+ if (unlikely((long)uninterruptible < 0))
+ uninterruptible = 0;
+
+ return running + uninterruptible;
+}
+
#ifdef CONFIG_SMP
/*
@@ -2860,6 +2883,12 @@ EXPORT_SYMBOL(sub_preempt_count);
#endif
+static inline int interactive_sleep(enum sleep_type sleep_type)
+{
+ return (sleep_type == SLEEP_INTERACTIVE ||
+ sleep_type == SLEEP_INTERRUPTED);
+}
+
/*
* schedule() is the main scheduler function.
*/
@@ -2983,12 +3012,12 @@ go_idle:
queue = array->queue + idx;
next = list_entry(queue->next, task_t, run_list);
- if (!rt_task(next) && next->activated > 0) {
+ if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
unsigned long long delta = now - next->timestamp;
if (unlikely((long long)(now - next->timestamp) < 0))
delta = 0;
- if (next->activated == 1)
+ if (next->sleep_type == SLEEP_INTERACTIVE)
delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
array = next->array;
@@ -2998,10 +3027,9 @@ go_idle:
dequeue_task(next, array);
next->prio = new_prio;
enqueue_task(next, array);
- } else
- requeue_task(next, array);
+ }
}
- next->activated = 0;
+ next->sleep_type = SLEEP_NORMAL;
switch_tasks:
if (next == rq->idle)
schedstat_inc(rq, sched_goidle);
diff --git a/kernel/signal.c b/kernel/signal.c
index 4922928d91f68..5ccaac505e8da 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -769,8 +769,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
int ret = 0;
- if (!irqs_disabled())
- BUG();
+ BUG_ON(!irqs_disabled());
assert_spin_locked(&t->sighand->siglock);
/* Short-circuit ignored signals. */
@@ -1384,8 +1383,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
* the overrun count. Other uses should not try to
* send the signal multiple times.
*/
- if (q->info.si_code != SI_TIMER)
- BUG();
+ BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
goto out;
}
@@ -1560,6 +1558,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
/* Let the debugger run. */
set_current_state(TASK_TRACED);
spin_unlock_irq(&current->sighand->siglock);
+ try_to_freeze();
read_lock(&tasklist_lock);
if (likely(current->ptrace & PT_PTRACED) &&
likely(current->parent != current->real_parent ||
diff --git a/kernel/sys.c b/kernel/sys.c
index 7ef7f6054c285..0b6ec0e7936f0 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1372,18 +1372,29 @@ asmlinkage long sys_getsid(pid_t pid)
asmlinkage long sys_setsid(void)
{
struct task_struct *group_leader = current->group_leader;
- struct pid *pid;
+ pid_t session;
int err = -EPERM;
mutex_lock(&tty_mutex);
write_lock_irq(&tasklist_lock);
- pid = find_pid(PIDTYPE_PGID, group_leader->pid);
- if (pid)
+ /* Fail if I am already a session leader */
+ if (group_leader->signal->leader)
+ goto out;
+
+ session = group_leader->pid;
+ /* Fail if a process group id already exists that equals the
+ * proposed session id.
+ *
+ * Don't check if session id == 1 because kernel threads use this
+ * session id and so the check will always fail and make it so
+ * init cannot successfully call setsid.
+ */
+ if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
goto out;
group_leader->signal->leader = 1;
- __set_special_pids(group_leader->pid, group_leader->pid);
+ __set_special_pids(session, session);
group_leader->signal->tty = NULL;
group_leader->signal->tty_old_pgrp = 0;
err = process_group(group_leader);
diff --git a/kernel/time.c b/kernel/time.c
index ff8e7019c4c49..b00ddc71cedb8 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(current_kernel_time);
* current_fs_time - Return FS time
* @sb: Superblock.
*
- * Return the current time truncated to the time granuality supported by
+ * Return the current time truncated to the time granularity supported by
* the fs.
*/
struct timespec current_fs_time(struct super_block *sb)
@@ -421,11 +421,11 @@ struct timespec current_fs_time(struct super_block *sb)
EXPORT_SYMBOL(current_fs_time);
/**
- * timespec_trunc - Truncate timespec to a granuality
+ * timespec_trunc - Truncate timespec to a granularity
* @t: Timespec
- * @gran: Granuality in ns.
+ * @gran: Granularity in ns.
*
- * Truncate a timespec to a granuality. gran must be smaller than a second.
+ * Truncate a timespec to a granularity. gran must be smaller than a second.
* Always rounds down.
*
* This function should be only used for timestamps returned by
diff --git a/kernel/timer.c b/kernel/timer.c
index ab189dd187cb8..c3a874f1393cd 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(jiffies_64);
/*
* per-CPU timer vector definitions:
*/
-
#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
#define TVN_SIZE (1 << TVN_BITS)
@@ -62,11 +61,6 @@ EXPORT_SYMBOL(jiffies_64);
#define TVN_MASK (TVN_SIZE - 1)
#define TVR_MASK (TVR_SIZE - 1)
-struct timer_base_s {
- spinlock_t lock;
- struct timer_list *running_timer;
-};
-
typedef struct tvec_s {
struct list_head vec[TVN_SIZE];
} tvec_t;
@@ -76,7 +70,8 @@ typedef struct tvec_root_s {
} tvec_root_t;
struct tvec_t_base_s {
- struct timer_base_s t_base;
+ spinlock_t lock;
+ struct timer_list *running_timer;
unsigned long timer_jiffies;
tvec_root_t tv1;
tvec_t tv2;
@@ -87,13 +82,14 @@ struct tvec_t_base_s {
typedef struct tvec_t_base_s tvec_base_t;
static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
-static tvec_base_t boot_tvec_bases;
+tvec_base_t boot_tvec_bases;
+EXPORT_SYMBOL(boot_tvec_bases);
static inline void set_running_timer(tvec_base_t *base,
struct timer_list *timer)
{
#ifdef CONFIG_SMP
- base->t_base.running_timer = timer;
+ base->running_timer = timer;
#endif
}
@@ -139,15 +135,6 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
list_add_tail(&timer->entry, vec);
}
-typedef struct timer_base_s timer_base_t;
-/*
- * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
- * at compile time, and we need timer->base to lock the timer.
- */
-timer_base_t __init_timer_base
- ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED };
-EXPORT_SYMBOL(__init_timer_base);
-
/***
* init_timer - initialize a timer.
* @timer: the timer to be initialized
@@ -158,7 +145,7 @@ EXPORT_SYMBOL(__init_timer_base);
void fastcall init_timer(struct timer_list *timer)
{
timer->entry.next = NULL;
- timer->base = &per_cpu(tvec_bases, raw_smp_processor_id())->t_base;
+ timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
}
EXPORT_SYMBOL(init_timer);
@@ -174,7 +161,7 @@ static inline void detach_timer(struct timer_list *timer,
}
/*
- * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock
+ * We are using hashed locking: holding per_cpu(tvec_bases).lock
* means that all timers which are tied to this base via timer->base are
* locked, and the base itself is locked too.
*
@@ -185,10 +172,10 @@ static inline void detach_timer(struct timer_list *timer,
* possible to set timer->base = NULL and drop the lock: the timer remains
* locked.
*/
-static timer_base_t *lock_timer_base(struct timer_list *timer,
+static tvec_base_t *lock_timer_base(struct timer_list *timer,
unsigned long *flags)
{
- timer_base_t *base;
+ tvec_base_t *base;
for (;;) {
base = timer->base;
@@ -205,8 +192,7 @@ static timer_base_t *lock_timer_base(struct timer_list *timer,
int __mod_timer(struct timer_list *timer, unsigned long expires)
{
- timer_base_t *base;
- tvec_base_t *new_base;
+ tvec_base_t *base, *new_base;
unsigned long flags;
int ret = 0;
@@ -221,7 +207,7 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
new_base = __get_cpu_var(tvec_bases);
- if (base != &new_base->t_base) {
+ if (base != new_base) {
/*
* We are trying to schedule the timer on the local CPU.
* However we can't change timer's base while it is running,
@@ -229,21 +215,19 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
- if (unlikely(base->running_timer == timer)) {
- /* The timer remains on a former base */
- new_base = container_of(base, tvec_base_t, t_base);
- } else {
+ if (likely(base->running_timer != timer)) {
/* See the comment in lock_timer_base() */
timer->base = NULL;
spin_unlock(&base->lock);
- spin_lock(&new_base->t_base.lock);
- timer->base = &new_base->t_base;
+ base = new_base;
+ spin_lock(&base->lock);
+ timer->base = base;
}
}
timer->expires = expires;
- internal_add_timer(new_base, timer);
- spin_unlock_irqrestore(&new_base->t_base.lock, flags);
+ internal_add_timer(base, timer);
+ spin_unlock_irqrestore(&base->lock, flags);
return ret;
}
@@ -263,10 +247,10 @@ void add_timer_on(struct timer_list *timer, int cpu)
unsigned long flags;
BUG_ON(timer_pending(timer) || !timer->function);
- spin_lock_irqsave(&base->t_base.lock, flags);
- timer->base = &base->t_base;
+ spin_lock_irqsave(&base->lock, flags);
+ timer->base = base;
internal_add_timer(base, timer);
- spin_unlock_irqrestore(&base->t_base.lock, flags);
+ spin_unlock_irqrestore(&base->lock, flags);
}
@@ -319,7 +303,7 @@ EXPORT_SYMBOL(mod_timer);
*/
int del_timer(struct timer_list *timer)
{
- timer_base_t *base;
+ tvec_base_t *base;
unsigned long flags;
int ret = 0;
@@ -346,7 +330,7 @@ EXPORT_SYMBOL(del_timer);
*/
int try_to_del_timer_sync(struct timer_list *timer)
{
- timer_base_t *base;
+ tvec_base_t *base;
unsigned long flags;
int ret = -1;
@@ -410,7 +394,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
struct timer_list *tmp;
tmp = list_entry(curr, struct timer_list, entry);
- BUG_ON(tmp->base != &base->t_base);
+ BUG_ON(tmp->base != base);
curr = curr->next;
internal_add_timer(base, tmp);
}
@@ -432,7 +416,7 @@ static inline void __run_timers(tvec_base_t *base)
{
struct timer_list *timer;
- spin_lock_irq(&base->t_base.lock);
+ spin_lock_irq(&base->lock);
while (time_after_eq(jiffies, base->timer_jiffies)) {
struct list_head work_list = LIST_HEAD_INIT(work_list);
struct list_head *head = &work_list;
@@ -458,7 +442,7 @@ static inline void __run_timers(tvec_base_t *base)
set_running_timer(base, timer);
detach_timer(timer, 1);
- spin_unlock_irq(&base->t_base.lock);
+ spin_unlock_irq(&base->lock);
{
int preempt_count = preempt_count();
fn(data);
@@ -471,11 +455,11 @@ static inline void __run_timers(tvec_base_t *base)
BUG();
}
}
- spin_lock_irq(&base->t_base.lock);
+ spin_lock_irq(&base->lock);
}
}
set_running_timer(base, NULL);
- spin_unlock_irq(&base->t_base.lock);
+ spin_unlock_irq(&base->lock);
}
#ifdef CONFIG_NO_IDLE_HZ
@@ -506,7 +490,7 @@ unsigned long next_timer_interrupt(void)
hr_expires += jiffies;
base = __get_cpu_var(tvec_bases);
- spin_lock(&base->t_base.lock);
+ spin_lock(&base->lock);
expires = base->timer_jiffies + (LONG_MAX >> 1);
list = NULL;
@@ -554,7 +538,7 @@ found:
expires = nte->expires;
}
}
- spin_unlock(&base->t_base.lock);
+ spin_unlock(&base->lock);
if (time_before(hr_expires, expires))
return hr_expires;
@@ -841,7 +825,7 @@ void update_process_times(int user_tick)
*/
static unsigned long count_active_tasks(void)
{
- return (nr_running() + nr_uninterruptible()) * FIXED_1;
+ return nr_active() * FIXED_1;
}
/*
@@ -1262,7 +1246,7 @@ static int __devinit init_timers_cpu(int cpu)
}
per_cpu(tvec_bases, cpu) = base;
}
- spin_lock_init(&base->t_base.lock);
+ spin_lock_init(&base->lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
INIT_LIST_HEAD(base->tv4.vec + j);
@@ -1284,7 +1268,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
while (!list_empty(head)) {
timer = list_entry(head->next, struct timer_list, entry);
detach_timer(timer, 0);
- timer->base = &new_base->t_base;
+ timer->base = new_base;
internal_add_timer(new_base, timer);
}
}
@@ -1300,11 +1284,11 @@ static void __devinit migrate_timers(int cpu)
new_base = get_cpu_var(tvec_bases);
local_irq_disable();
- spin_lock(&new_base->t_base.lock);
- spin_lock(&old_base->t_base.lock);
+ spin_lock(&new_base->lock);
+ spin_lock(&old_base->lock);
+
+ BUG_ON(old_base->running_timer);
- if (old_base->t_base.running_timer)
- BUG();
for (i = 0; i < TVR_SIZE; i++)
migrate_timer_list(new_base, old_base->tv1.vec + i);
for (i = 0; i < TVN_SIZE; i++) {
@@ -1314,8 +1298,8 @@ static void __devinit migrate_timers(int cpu)
migrate_timer_list(new_base, old_base->tv5.vec + i);
}
- spin_unlock(&old_base->t_base.lock);
- spin_unlock(&new_base->t_base.lock);
+ spin_unlock(&old_base->lock);
+ spin_unlock(&new_base->lock);
local_irq_enable();
put_cpu_var(tvec_bases);
}
@@ -1495,8 +1479,7 @@ register_time_interpolator(struct time_interpolator *ti)
unsigned long flags;
/* Sanity check */
- if (ti->frequency == 0 || ti->mask == 0)
- BUG();
+ BUG_ON(ti->frequency == 0 || ti->mask == 0);
ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 907c39257ca06..0a03357a1f8ed 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -35,17 +35,6 @@
*
* LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
*
- * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently
- * dirty pages at the disk.
- *
- * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push
- * all of the currently dirty pages at the disk, wait until they have been
- * written.
- *
- * It should be noted that none of these operations write out the file's
- * metadata. So unless the application is strictly performing overwrites of
- * already-instantiated disk blocks, there are no guarantees here that the data
- * will be available after a crash.
*/
asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
{
@@ -129,15 +118,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
invalidate_mapping_pages(mapping, start_index,
end_index);
break;
- case LINUX_FADV_ASYNC_WRITE:
- ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
- WB_SYNC_NONE);
- break;
- case LINUX_FADV_WRITE_WAIT:
- ret = wait_on_page_writeback_range(mapping,
- offset >> PAGE_CACHE_SHIFT,
- endbyte >> PAGE_CACHE_SHIFT);
- break;
default:
ret = -EINVAL;
}
diff --git a/mm/highmem.c b/mm/highmem.c
index 55885f64af406..9b274fdf9d08d 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void)
pkmap_count[i] = 0;
/* sanity check */
- if (pte_none(pkmap_page_table[i]))
- BUG();
+ BUG_ON(pte_none(pkmap_page_table[i]));
/*
* Don't need an atomic fetch-and-clear op here;
@@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page)
if (!vaddr)
vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++;
- if (pkmap_count[PKMAP_NR(vaddr)] < 2)
- BUG();
+ BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
spin_unlock(&kmap_lock);
return (void*) vaddr;
}
@@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page)
spin_lock(&kmap_lock);
vaddr = (unsigned long)page_address(page);
- if (!vaddr)
- BUG();
+ BUG_ON(!vaddr);
nr = PKMAP_NR(vaddr);
/*
@@ -220,8 +217,7 @@ static __init int init_emergency_pool(void)
return 0;
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
- if (!page_pool)
- BUG();
+ BUG_ON(!page_pool);
printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
return 0;
@@ -264,8 +260,7 @@ int init_emergency_isa_pool(void)
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0);
- if (!isa_page_pool)
- BUG();
+ BUG_ON(!isa_page_pool);
printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
return 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ebad6bbb35012..832f676ca0388 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -334,6 +334,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
return nr_huge_pages;
spin_lock(&hugetlb_lock);
+ count = max(count, reserved_huge_pages);
try_to_free_low(count);
while (count < nr_huge_pages) {
struct page *page = dequeue_huge_page(NULL, 0);
@@ -697,9 +698,10 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
page = pte_page(*pte);
same_page:
- get_page(page);
- if (pages)
+ if (pages) {
+ get_page(page);
pages[i] = page + pfn_offset;
+ }
if (vmas)
vmas[i] = vma;
diff --git a/mm/memory.c b/mm/memory.c
index 8d8f52569f328..0ec7bc644271c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -87,7 +87,7 @@ int randomize_va_space __read_mostly = 1;
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
- return 0;
+ return 1;
}
__setup("norandmaps", disable_randmaps);
diff --git a/mm/mmap.c b/mm/mmap.c
index 4f5b5709136ab..e780d19aa2144 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm)
i = browse_rb(&mm->mm_rb);
if (i != mm->map_count)
printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
- if (bug)
- BUG();
+ BUG_ON(bug);
}
#else
#define validate_mm(mm) do { } while (0)
@@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
struct rb_node ** rb_link, * rb_parent;
__vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
- if (__vma && __vma->vm_start < vma->vm_end)
- BUG();
+ BUG_ON(__vma && __vma->vm_start < vma->vm_end);
__vma_link(mm, vma, prev, rb_link, rb_parent);
mm->map_count++;
}
@@ -813,8 +811,7 @@ try_prev:
* (e.g. stash info in next's anon_vma_node when assigning
* an anon_vma, or when trying vma_merge). Another time.
*/
- if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
- BUG();
+ BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
if (!near)
goto none;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 893d7677579ec..6dcce3a4bbdc6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping)
/**
* balance_dirty_pages_ratelimited_nr - balance dirty memory state
* @mapping: address_space which was dirtied
- * @nr_pages: number of pages which the caller has just dirtied
+ * @nr_pages_dirtied: number of pages which the caller has just dirtied
*
* Processes which are dirtying memory should call in here once for each page
* which was newly dirtied. The function will periodically check the system's
diff --git a/mm/slab.c b/mm/slab.c
index 4cbf8bb135571..f055c14202161 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
if (cache_cache.num)
break;
}
- if (!cache_cache.num)
- BUG();
+ BUG_ON(!cache_cache.num);
cache_cache.gfporder = order;
cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Always checks flags, a caller might be expecting debug support which
* isn't available.
*/
- if (flags & ~CREATE_MASK)
- BUG();
+ BUG_ON(flags & ~CREATE_MASK);
/*
* Check that size is in terms of words. This is needed to avoid
@@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
slabp = list_entry(l3->slabs_free.prev, struct slab, list);
#if DEBUG
- if (slabp->inuse)
- BUG();
+ BUG_ON(slabp->inuse);
#endif
list_del(&slabp->list);
@@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
*/
int kmem_cache_shrink(struct kmem_cache *cachep)
{
- if (!cachep || in_interrupt())
- BUG();
+ BUG_ON(!cachep || in_interrupt());
return __cache_shrink(cachep);
}
@@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
int i;
struct kmem_list3 *l3;
- if (!cachep || in_interrupt())
- BUG();
+ BUG_ON(!cachep || in_interrupt());
/* Don't let CPUs to come and go */
lock_cpu_hotplug();
@@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
- if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
- BUG();
+ BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
if (flags & SLAB_NO_GROW)
return 0;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d7af296833fcb..e0e1583f32c26 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
swp_entry_t entry;
int err;
- if (!PageLocked(page))
- BUG();
+ BUG_ON(!PageLocked(page));
for (;;) {
entry = get_swap_page();
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 39aa9d1296120..e5fd5385f0cc1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -397,18 +397,24 @@ void free_swap_and_cache(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
- if (swap_entry_free(p, swp_offset(entry)) == 1)
- page = find_trylock_page(&swapper_space, entry.val);
+ if (swap_entry_free(p, swp_offset(entry)) == 1) {
+ page = find_get_page(&swapper_space, entry.val);
+ if (page && unlikely(TestSetPageLocked(page))) {
+ page_cache_release(page);
+ page = NULL;
+ }
+ }
spin_unlock(&swap_lock);
}
if (page) {
int one_user;
BUG_ON(PagePrivate(page));
- page_cache_get(page);
one_user = (page_count(page) == 2);
/* Only cache user (+us), or swap space full? Free it! */
- if (!PageWriteback(page) && (one_user || vm_swap_full())) {
+ /* Also recheck PageSwapCache after page is locked (above) */
+ if (PageSwapCache(page) && !PageWriteback(page) &&
+ (one_user || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 729eb3eec75fd..c0504f1e34ebd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages)
int i;
for (i = 0; i < area->nr_pages; i++) {
- if (unlikely(!area->pages[i]))
- BUG();
+ BUG_ON(!area->pages[i]);
__free_page(area->pages[i]);
}
diff --git a/net/compat.c b/net/compat.c
index 8fd37cd7b5013..d5d69fa15d07a 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -476,8 +476,7 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
int err;
struct socket *sock;
- /* SO_SET_REPLACE seems to be the same in all levels */
- if (optname == IPT_SO_SET_REPLACE)
+ if (level == SOL_IPV6 && optname == IPT_SO_SET_REPLACE)
return do_netfilter_replace(fd, level, optname,
optval, optlen);
diff --git a/net/core/dev.c b/net/core/dev.c
index a3ab11f34153a..434220d093aae 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1080,6 +1080,70 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
}
+
+void __netif_schedule(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
+ unsigned long flags;
+ struct softnet_data *sd;
+
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ dev->next_sched = sd->output_queue;
+ sd->output_queue = dev;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(__netif_schedule);
+
+void __netif_rx_schedule(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ dev_hold(dev);
+ list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
+ if (dev->quota < 0)
+ dev->quota += dev->weight;
+ else
+ dev->quota = dev->weight;
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__netif_rx_schedule);
+
+void dev_kfree_skb_any(struct sk_buff *skb)
+{
+ if (in_irq() || irqs_disabled())
+ dev_kfree_skb_irq(skb);
+ else
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(dev_kfree_skb_any);
+
+
+/* Hot-plugging. */
+void netif_device_detach(struct net_device *dev)
+{
+ if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev)) {
+ netif_stop_queue(dev);
+ }
+}
+EXPORT_SYMBOL(netif_device_detach);
+
+void netif_device_attach(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev)) {
+ netif_wake_queue(dev);
+ __netdev_watchdog_up(dev);
+ }
+}
+EXPORT_SYMBOL(netif_device_attach);
+
+
/*
* Invalidate hardware checksum when packet is to be mangled, and
* complete checksum manually on outgoing path.
diff --git a/net/core/sock.c b/net/core/sock.c
index a96ea7dd0fc1b..ed2afdb9ea2df 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -385,7 +385,21 @@ set_sndbuf:
val = sysctl_rmem_max;
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
- /* FIXME: is this lower bound the right one? */
+ /*
+ * We double it on the way in to account for
+ * "struct sk_buff" etc. overhead. Applications
+ * assume that the SO_RCVBUF setting they make will
+ * allow that much actual data to be received on that
+ * socket.
+ *
+ * Applications are unaware that "struct sk_buff" and
+ * other overheads allocate from the receive buffer
+ * during socket buffer allocation.
+ *
+ * And after considering the possible alternatives,
+ * returning the value we actually used in getsockopt
+ * is the most desirable behavior.
+ */
if ((val * 2) < SOCK_MIN_RCVBUF)
sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
else
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index e3dd30d36c8a0..b39e2a5978896 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -204,7 +204,7 @@ static int dccp_feat_reconcile(struct sock *sk, struct dccp_opt_pend *opt,
if (rc) {
kfree(opt->dccpop_sc->dccpoc_val);
kfree(opt->dccpop_sc);
- opt->dccpop_sc = 0;
+ opt->dccpop_sc = NULL;
return rc;
}
@@ -322,7 +322,7 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R :
DCCPO_CONFIRM_L;
opt->dccpop_feat = feature;
- opt->dccpop_val = 0;
+ opt->dccpop_val = NULL;
opt->dccpop_len = 0;
/* change feature */
@@ -523,7 +523,7 @@ int dccp_feat_clone(struct sock *oldsk, struct sock *newsk)
* once...
*/
/* the master socket no longer needs to worry about confirms */
- opt->dccpop_sc = 0; /* it's not a memleak---new socket has it */
+ opt->dccpop_sc = NULL; /* it's not a memleak---new socket has it */
/* reset state for a new socket */
opt->dccpop_conf = 0;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index d2ae9893ca173..a26ff9f44576f 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -620,7 +620,7 @@ int dn_dev_set_default(struct net_device *dev, int force)
}
write_unlock(&dndev_lock);
if (old)
- dev_put(dev);
+ dev_put(old);
return rv;
}
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index e16d8b42b953b..e2e4771fa4c6d 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -116,7 +116,7 @@ error:
return err;
}
-static int ah_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
+static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ah_hlen;
struct iphdr *iph;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index bf88c620a9544..9d1881c07a32a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -133,7 +133,7 @@ error:
* expensive, so we only support truncated data, which is the recommended
* and common case.
*/
-static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
+static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct iphdr *iph;
struct ip_esp_hdr *esph;
@@ -208,9 +208,6 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
struct xfrm_encap_tmpl *encap = x->encap;
struct udphdr *uh;
- if (encap->encap_type != decap->decap_type)
- goto out;
-
uh = (struct udphdr *)(iph + 1);
encap_len = (void*)esph - (void*)uh;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index c95020f7c81e3..0a1d86a0f6328 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -81,8 +81,7 @@ out:
return err;
}
-static int ipcomp_input(struct xfrm_state *x,
- struct xfrm_decap_state *decap, struct sk_buff *skb)
+static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
{
u8 nexthdr;
int err = 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 882b842c25d4a..77855ccd6b439 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -221,16 +221,6 @@ config IP_NF_MATCH_IPRANGE
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_MATCH_MULTIPORT
- tristate "Multiple port match support"
- depends on IP_NF_IPTABLES
- help
- Multiport matching allows you to match TCP or UDP packets based on
- a series of source or destination ports: normally a rule can only
- match a single range of ports.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_MATCH_TOS
tristate "TOS match support"
depends on IP_NF_IPTABLES
@@ -272,12 +262,12 @@ config IP_NF_MATCH_DSCP
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_MATCH_AH_ESP
- tristate "AH/ESP match support"
+config IP_NF_MATCH_AH
+ tristate "AH match support"
depends on IP_NF_IPTABLES
help
- These two match extensions (`ah' and `esp') allow you to match a
- range of SPIs inside AH or ESP headers of IPSec packets.
+ This match extension allows you to match a range of SPIs
+ inside AH header of IPSec packets.
To compile it as a module, choose M here. If unsure, say N.
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index f2cd9a6c5b917..461cb1eb5de79 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -53,13 +53,12 @@ obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
# matches
obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
-obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
obj-$(CONFIG_IP_NF_MATCH_DSCP) += ipt_dscp.o
-obj-$(CONFIG_IP_NF_MATCH_AH_ESP) += ipt_ah.o ipt_esp.o
+obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 9b6e19bae90fe..01bd7cab93676 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1658,7 +1658,7 @@ static void __exit ctnetlink_exit(void)
printk("ctnetlink: unregistering from nfnetlink.\n");
#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
- ip_conntrack_unregister_notifier(&ctnl_notifier_exp);
+ ip_conntrack_expect_unregister_notifier(&ctnl_notifier_exp);
ip_conntrack_unregister_notifier(&ctnl_notifier);
#endif
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 460fd905fad03..d5b8cdd361ce8 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/icmp.h>
#include <net/ip.h>
+#include <net/compat.h>
#include <asm/uaccess.h>
#include <linux/mutex.h>
#include <linux/proc_fs.h>
@@ -799,17 +800,11 @@ get_counters(const struct xt_table_info *t,
}
}
-static int
-copy_entries_to_user(unsigned int total_size,
- struct ipt_table *table,
- void __user *userptr)
+static inline struct xt_counters * alloc_counters(struct ipt_table *table)
{
- unsigned int off, num, countersize;
- struct ipt_entry *e;
+ unsigned int countersize;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
- int ret = 0;
- void *loc_cpu_entry;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@@ -818,13 +813,32 @@ copy_entries_to_user(unsigned int total_size,
counters = vmalloc_node(countersize, numa_node_id());
if (counters == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
/* First, sum counters... */
write_lock_bh(&table->lock);
get_counters(private, counters);
write_unlock_bh(&table->lock);
+ return counters;
+}
+
+static int
+copy_entries_to_user(unsigned int total_size,
+ struct ipt_table *table,
+ void __user *userptr)
+{
+ unsigned int off, num;
+ struct ipt_entry *e;
+ struct xt_counters *counters;
+ struct xt_table_info *private = table->private;
+ int ret = 0;
+ void *loc_cpu_entry;
+
+ counters = alloc_counters(table);
+ if (IS_ERR(counters))
+ return PTR_ERR(counters);
+
/* choose the copy that is on our node/cpu, ...
* This choice is lazy (because current thread is
* allowed to migrate to another cpu)
@@ -884,25 +898,278 @@ copy_entries_to_user(unsigned int total_size,
return ret;
}
+#ifdef CONFIG_COMPAT
+struct compat_delta {
+ struct compat_delta *next;
+ u_int16_t offset;
+ short delta;
+};
+
+static struct compat_delta *compat_offsets = NULL;
+
+static int compat_add_offset(u_int16_t offset, short delta)
+{
+ struct compat_delta *tmp;
+
+ tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ tmp->offset = offset;
+ tmp->delta = delta;
+ if (compat_offsets) {
+ tmp->next = compat_offsets->next;
+ compat_offsets->next = tmp;
+ } else {
+ compat_offsets = tmp;
+ tmp->next = NULL;
+ }
+ return 0;
+}
+
+static void compat_flush_offsets(void)
+{
+ struct compat_delta *tmp, *next;
+
+ if (compat_offsets) {
+ for(tmp = compat_offsets; tmp; tmp = next) {
+ next = tmp->next;
+ kfree(tmp);
+ }
+ compat_offsets = NULL;
+ }
+}
+
+static short compat_calc_jump(u_int16_t offset)
+{
+ struct compat_delta *tmp;
+ short delta;
+
+ for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
+ if (tmp->offset < offset)
+ delta += tmp->delta;
+ return delta;
+}
+
+struct compat_ipt_standard_target
+{
+ struct compat_xt_entry_target target;
+ compat_int_t verdict;
+};
+
+#define IPT_ST_OFFSET (sizeof(struct ipt_standard_target) - \
+ sizeof(struct compat_ipt_standard_target))
+
+struct compat_ipt_standard
+{
+ struct compat_ipt_entry entry;
+ struct compat_ipt_standard_target target;
+};
+
+static int compat_ipt_standard_fn(void *target,
+ void **dstptr, int *size, int convert)
+{
+ struct compat_ipt_standard_target compat_st, *pcompat_st;
+ struct ipt_standard_target st, *pst;
+ int ret;
+
+ ret = 0;
+ switch (convert) {
+ case COMPAT_TO_USER:
+ pst = (struct ipt_standard_target *)target;
+ memcpy(&compat_st.target, &pst->target,
+ sizeof(struct ipt_entry_target));
+ compat_st.verdict = pst->verdict;
+ if (compat_st.verdict > 0)
+ compat_st.verdict -=
+ compat_calc_jump(compat_st.verdict);
+ compat_st.target.u.user.target_size =
+ sizeof(struct compat_ipt_standard_target);
+ if (__copy_to_user(*dstptr, &compat_st,
+ sizeof(struct compat_ipt_standard_target)))
+ ret = -EFAULT;
+ *size -= IPT_ST_OFFSET;
+ *dstptr += sizeof(struct compat_ipt_standard_target);
+ break;
+ case COMPAT_FROM_USER:
+ pcompat_st =
+ (struct compat_ipt_standard_target *)target;
+ memcpy(&st.target, &pcompat_st->target,
+ sizeof(struct ipt_entry_target));
+ st.verdict = pcompat_st->verdict;
+ if (st.verdict > 0)
+ st.verdict += compat_calc_jump(st.verdict);
+ st.target.u.user.target_size =
+ sizeof(struct ipt_standard_target);
+ memcpy(*dstptr, &st,
+ sizeof(struct ipt_standard_target));
+ *size += IPT_ST_OFFSET;
+ *dstptr += sizeof(struct ipt_standard_target);
+ break;
+ case COMPAT_CALC_SIZE:
+ *size += IPT_ST_OFFSET;
+ break;
+ default:
+ ret = -ENOPROTOOPT;
+ break;
+ }
+ return ret;
+}
+
+static inline int
+compat_calc_match(struct ipt_entry_match *m, int * size)
+{
+ if (m->u.kernel.match->compat)
+ m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
+ else
+ xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
+ return 0;
+}
+
+static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
+ void *base, struct xt_table_info *newinfo)
+{
+ struct ipt_entry_target *t;
+ u_int16_t entry_offset;
+ int off, i, ret;
+
+ off = 0;
+ entry_offset = (void *)e - base;
+ IPT_MATCH_ITERATE(e, compat_calc_match, &off);
+ t = ipt_get_target(e);
+ if (t->u.kernel.target->compat)
+ t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
+ else
+ xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
+ newinfo->size -= off;
+ ret = compat_add_offset(entry_offset, off);
+ if (ret)
+ return ret;
+
+ for (i = 0; i< NF_IP_NUMHOOKS; i++) {
+ if (info->hook_entry[i] && (e < (struct ipt_entry *)
+ (base + info->hook_entry[i])))
+ newinfo->hook_entry[i] -= off;
+ if (info->underflow[i] && (e < (struct ipt_entry *)
+ (base + info->underflow[i])))
+ newinfo->underflow[i] -= off;
+ }
+ return 0;
+}
+
+static int compat_table_info(struct xt_table_info *info,
+ struct xt_table_info *newinfo)
+{
+ void *loc_cpu_entry;
+ int i;
+
+ if (!newinfo || !info)
+ return -EINVAL;
+
+ memset(newinfo, 0, sizeof(struct xt_table_info));
+ newinfo->size = info->size;
+ newinfo->number = info->number;
+ for (i = 0; i < NF_IP_NUMHOOKS; i++) {
+ newinfo->hook_entry[i] = info->hook_entry[i];
+ newinfo->underflow[i] = info->underflow[i];
+ }
+ loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
+ compat_calc_entry, info, loc_cpu_entry, newinfo);
+}
+#endif
+
+static int get_info(void __user *user, int *len, int compat)
+{
+ char name[IPT_TABLE_MAXNAMELEN];
+ struct ipt_table *t;
+ int ret;
+
+ if (*len != sizeof(struct ipt_getinfo)) {
+ duprintf("length %u != %u\n", *len,
+ (unsigned int)sizeof(struct ipt_getinfo));
+ return -EINVAL;
+ }
+
+ if (copy_from_user(name, user, sizeof(name)) != 0)
+ return -EFAULT;
+
+ name[IPT_TABLE_MAXNAMELEN-1] = '\0';
+#ifdef CONFIG_COMPAT
+ if (compat)
+ xt_compat_lock(AF_INET);
+#endif
+ t = try_then_request_module(xt_find_table_lock(AF_INET, name),
+ "iptable_%s", name);
+ if (t && !IS_ERR(t)) {
+ struct ipt_getinfo info;
+ struct xt_table_info *private = t->private;
+
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ struct xt_table_info tmp;
+ ret = compat_table_info(private, &tmp);
+ compat_flush_offsets();
+ private = &tmp;
+ }
+#endif
+ info.valid_hooks = t->valid_hooks;
+ memcpy(info.hook_entry, private->hook_entry,
+ sizeof(info.hook_entry));
+ memcpy(info.underflow, private->underflow,
+ sizeof(info.underflow));
+ info.num_entries = private->number;
+ info.size = private->size;
+ strcpy(info.name, name);
+
+ if (copy_to_user(user, &info, *len) != 0)
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+ xt_table_unlock(t);
+ module_put(t->me);
+ } else
+ ret = t ? PTR_ERR(t) : -ENOENT;
+#ifdef CONFIG_COMPAT
+ if (compat)
+ xt_compat_unlock(AF_INET);
+#endif
+ return ret;
+}
+
static int
-get_entries(const struct ipt_get_entries *entries,
- struct ipt_get_entries __user *uptr)
+get_entries(struct ipt_get_entries __user *uptr, int *len)
{
int ret;
+ struct ipt_get_entries get;
struct ipt_table *t;
- t = xt_find_table_lock(AF_INET, entries->name);
+ if (*len < sizeof(get)) {
+ duprintf("get_entries: %u < %d\n", *len,
+ (unsigned int)sizeof(get));
+ return -EINVAL;
+ }
+ if (copy_from_user(&get, uptr, sizeof(get)) != 0)
+ return -EFAULT;
+ if (*len != sizeof(struct ipt_get_entries) + get.size) {
+ duprintf("get_entries: %u != %u\n", *len,
+ (unsigned int)(sizeof(struct ipt_get_entries) +
+ get.size));
+ return -EINVAL;
+ }
+
+ t = xt_find_table_lock(AF_INET, get.name);
if (t && !IS_ERR(t)) {
struct xt_table_info *private = t->private;
duprintf("t->private->number = %u\n",
private->number);
- if (entries->size == private->size)
+ if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
else {
duprintf("get_entries: I've got %u not %u!\n",
private->size,
- entries->size);
+ get.size);
ret = -EINVAL;
}
module_put(t->me);
@@ -914,79 +1181,47 @@ get_entries(const struct ipt_get_entries *entries,
}
static int
-do_replace(void __user *user, unsigned int len)
+__do_replace(const char *name, unsigned int valid_hooks,
+ struct xt_table_info *newinfo, unsigned int num_counters,
+ void __user *counters_ptr)
{
int ret;
- struct ipt_replace tmp;
struct ipt_table *t;
- struct xt_table_info *newinfo, *oldinfo;
+ struct xt_table_info *oldinfo;
struct xt_counters *counters;
- void *loc_cpu_entry, *loc_cpu_old_entry;
+ void *loc_cpu_old_entry;
- if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
- return -EFAULT;
-
- /* Hack: Causes ipchains to give correct error msg --RR */
- if (len != sizeof(tmp) + tmp.size)
- return -ENOPROTOOPT;
-
- /* overflow check */
- if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
- SMP_CACHE_BYTES)
- return -ENOMEM;
- if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
- return -ENOMEM;
-
- newinfo = xt_alloc_table_info(tmp.size);
- if (!newinfo)
- return -ENOMEM;
-
- /* choose the copy that is our node/cpu */
- loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
- if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
- tmp.size) != 0) {
- ret = -EFAULT;
- goto free_newinfo;
- }
-
- counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
+ ret = 0;
+ counters = vmalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
- goto free_newinfo;
+ goto out;
}
- ret = translate_table(tmp.name, tmp.valid_hooks,
- newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
- tmp.hook_entry, tmp.underflow);
- if (ret != 0)
- goto free_newinfo_counters;
-
- duprintf("ip_tables: Translated table\n");
-
- t = try_then_request_module(xt_find_table_lock(AF_INET, tmp.name),
- "iptable_%s", tmp.name);
+ t = try_then_request_module(xt_find_table_lock(AF_INET, name),
+ "iptable_%s", name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free_newinfo_counters_untrans;
}
/* You lied! */
- if (tmp.valid_hooks != t->valid_hooks) {
+ if (valid_hooks != t->valid_hooks) {
duprintf("Valid hook crap: %08X vs %08X\n",
- tmp.valid_hooks, t->valid_hooks);
+ valid_hooks, t->valid_hooks);
ret = -EINVAL;
goto put_module;
}
- oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
+ oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
if (!oldinfo)
goto put_module;
/* Update module usage count based on number of rules */
duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
oldinfo->number, oldinfo->initial_entries, newinfo->number);
- if ((oldinfo->number > oldinfo->initial_entries) ||
- (newinfo->number <= oldinfo->initial_entries))
+ if ((oldinfo->number > oldinfo->initial_entries) ||
+ (newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
if ((oldinfo->number > oldinfo->initial_entries) &&
(newinfo->number <= oldinfo->initial_entries))
@@ -998,8 +1233,8 @@ do_replace(void __user *user, unsigned int len)
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
xt_free_table_info(oldinfo);
- if (copy_to_user(tmp.counters, counters,
- sizeof(struct xt_counters) * tmp.num_counters) != 0)
+ if (copy_to_user(counters_ptr, counters,
+ sizeof(struct xt_counters) * num_counters) != 0)
ret = -EFAULT;
vfree(counters);
xt_table_unlock(t);
@@ -1009,9 +1244,62 @@ do_replace(void __user *user, unsigned int len)
module_put(t->me);
xt_table_unlock(t);
free_newinfo_counters_untrans:
- IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
- free_newinfo_counters:
vfree(counters);
+ out:
+ return ret;
+}
+
+static int
+do_replace(void __user *user, unsigned int len)
+{
+ int ret;
+ struct ipt_replace tmp;
+ struct xt_table_info *newinfo;
+ void *loc_cpu_entry;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+ /* Hack: Causes ipchains to give correct error msg --RR */
+ if (len != sizeof(tmp) + tmp.size)
+ return -ENOPROTOOPT;
+
+ /* overflow check */
+ if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
+ SMP_CACHE_BYTES)
+ return -ENOMEM;
+ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ return -ENOMEM;
+
+ newinfo = xt_alloc_table_info(tmp.size);
+ if (!newinfo)
+ return -ENOMEM;
+
+ /* choose the copy that is our node/cpu */
+ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+ if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
+ tmp.size) != 0) {
+ ret = -EFAULT;
+ goto free_newinfo;
+ }
+
+ ret = translate_table(tmp.name, tmp.valid_hooks,
+ newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
+ tmp.hook_entry, tmp.underflow);
+ if (ret != 0)
+ goto free_newinfo;
+
+ duprintf("ip_tables: Translated table\n");
+
+ ret = __do_replace(tmp.name, tmp.valid_hooks,
+ newinfo, tmp.num_counters,
+ tmp.counters);
+ if (ret)
+ goto free_newinfo_untrans;
+ return 0;
+
+ free_newinfo_untrans:
+ IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
@@ -1040,31 +1328,59 @@ add_counter_to_entry(struct ipt_entry *e,
}
static int
-do_add_counters(void __user *user, unsigned int len)
+do_add_counters(void __user *user, unsigned int len, int compat)
{
unsigned int i;
- struct xt_counters_info tmp, *paddc;
+ struct xt_counters_info tmp;
+ struct xt_counters *paddc;
+ unsigned int num_counters;
+ char *name;
+ int size;
+ void *ptmp;
struct ipt_table *t;
struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
+#ifdef CONFIG_COMPAT
+ struct compat_xt_counters_info compat_tmp;
- if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+ if (compat) {
+ ptmp = &compat_tmp;
+ size = sizeof(struct compat_xt_counters_info);
+ } else
+#endif
+ {
+ ptmp = &tmp;
+ size = sizeof(struct xt_counters_info);
+ }
+
+ if (copy_from_user(ptmp, user, size) != 0)
return -EFAULT;
- if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ num_counters = compat_tmp.num_counters;
+ name = compat_tmp.name;
+ } else
+#endif
+ {
+ num_counters = tmp.num_counters;
+ name = tmp.name;
+ }
+
+ if (len != size + num_counters * sizeof(struct xt_counters))
return -EINVAL;
- paddc = vmalloc_node(len, numa_node_id());
+ paddc = vmalloc_node(len - size, numa_node_id());
if (!paddc)
return -ENOMEM;
- if (copy_from_user(paddc, user, len) != 0) {
+ if (copy_from_user(paddc, user + size, len - size) != 0) {
ret = -EFAULT;
goto free;
}
- t = xt_find_table_lock(AF_INET, tmp.name);
+ t = xt_find_table_lock(AF_INET, name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
@@ -1072,7 +1388,7 @@ do_add_counters(void __user *user, unsigned int len)
write_lock_bh(&t->lock);
private = t->private;
- if (private->number != paddc->num_counters) {
+ if (private->number != num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
@@ -1083,7 +1399,7 @@ do_add_counters(void __user *user, unsigned int len)
IPT_ENTRY_ITERATE(loc_cpu_entry,
private->size,
add_counter_to_entry,
- paddc->counters,
+ paddc,
&i);
unlock_up_free:
write_unlock_bh(&t->lock);
@@ -1095,8 +1411,438 @@ do_add_counters(void __user *user, unsigned int len)
return ret;
}
+#ifdef CONFIG_COMPAT
+struct compat_ipt_replace {
+ char name[IPT_TABLE_MAXNAMELEN];
+ u32 valid_hooks;
+ u32 num_entries;
+ u32 size;
+ u32 hook_entry[NF_IP_NUMHOOKS];
+ u32 underflow[NF_IP_NUMHOOKS];
+ u32 num_counters;
+ compat_uptr_t counters; /* struct ipt_counters * */
+ struct compat_ipt_entry entries[0];
+};
+
+static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
+ void __user **dstptr, compat_uint_t *size)
+{
+ if (m->u.kernel.match->compat)
+ return m->u.kernel.match->compat(m, dstptr, size,
+ COMPAT_TO_USER);
+ else
+ return xt_compat_match(m, dstptr, size, COMPAT_TO_USER);
+}
+
+static int compat_copy_entry_to_user(struct ipt_entry *e,
+ void __user **dstptr, compat_uint_t *size)
+{
+ struct ipt_entry_target __user *t;
+ struct compat_ipt_entry __user *ce;
+ u_int16_t target_offset, next_offset;
+ compat_uint_t origsize;
+ int ret;
+
+ ret = -EFAULT;
+ origsize = *size;
+ ce = (struct compat_ipt_entry __user *)*dstptr;
+ if (__copy_to_user(ce, e, sizeof(struct ipt_entry)))
+ goto out;
+
+ *dstptr += sizeof(struct compat_ipt_entry);
+ ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
+ target_offset = e->target_offset - (origsize - *size);
+ if (ret)
+ goto out;
+ t = ipt_get_target(e);
+ if (t->u.kernel.target->compat)
+ ret = t->u.kernel.target->compat(t, dstptr, size,
+ COMPAT_TO_USER);
+ else
+ ret = xt_compat_target(t, dstptr, size, COMPAT_TO_USER);
+ if (ret)
+ goto out;
+ ret = -EFAULT;
+ next_offset = e->next_offset - (origsize - *size);
+ if (__put_user(target_offset, &ce->target_offset))
+ goto out;
+ if (__put_user(next_offset, &ce->next_offset))
+ goto out;
+ return 0;
+out:
+ return ret;
+}
+
+static inline int
+compat_check_calc_match(struct ipt_entry_match *m,
+ const char *name,
+ const struct ipt_ip *ip,
+ unsigned int hookmask,
+ int *size, int *i)
+{
+ struct ipt_match *match;
+
+ match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
+ m->u.user.revision),
+ "ipt_%s", m->u.user.name);
+ if (IS_ERR(match) || !match) {
+ duprintf("compat_check_calc_match: `%s' not found\n",
+ m->u.user.name);
+ return match ? PTR_ERR(match) : -ENOENT;
+ }
+ m->u.kernel.match = match;
+
+ if (m->u.kernel.match->compat)
+ m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
+ else
+ xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
+
+ (*i)++;
+ return 0;
+}
+
+static inline int
+check_compat_entry_size_and_hooks(struct ipt_entry *e,
+ struct xt_table_info *newinfo,
+ unsigned int *size,
+ unsigned char *base,
+ unsigned char *limit,
+ unsigned int *hook_entries,
+ unsigned int *underflows,
+ unsigned int *i,
+ const char *name)
+{
+ struct ipt_entry_target *t;
+ struct ipt_target *target;
+ u_int16_t entry_offset;
+ int ret, off, h, j;
+
+ duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
+ || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
+ duprintf("Bad offset %p, limit = %p\n", e, limit);
+ return -EINVAL;
+ }
+
+ if (e->next_offset < sizeof(struct compat_ipt_entry) +
+ sizeof(struct compat_xt_entry_target)) {
+ duprintf("checking: element %p size %u\n",
+ e, e->next_offset);
+ return -EINVAL;
+ }
+
+ if (!ip_checkentry(&e->ip)) {
+ duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+ return -EINVAL;
+ }
+
+ off = 0;
+ entry_offset = (void *)e - (void *)base;
+ j = 0;
+ ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
+ e->comefrom, &off, &j);
+ if (ret != 0)
+ goto out;
+
+ t = ipt_get_target(e);
+ target = try_then_request_module(xt_find_target(AF_INET,
+ t->u.user.name,
+ t->u.user.revision),
+ "ipt_%s", t->u.user.name);
+ if (IS_ERR(target) || !target) {
+ duprintf("check_entry: `%s' not found\n", t->u.user.name);
+ ret = target ? PTR_ERR(target) : -ENOENT;
+ goto out;
+ }
+ t->u.kernel.target = target;
+
+ if (t->u.kernel.target->compat)
+ t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
+ else
+ xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
+ *size += off;
+ ret = compat_add_offset(entry_offset, off);
+ if (ret)
+ goto out;
+
+ /* Check hooks & underflows */
+ for (h = 0; h < NF_IP_NUMHOOKS; h++) {
+ if ((unsigned char *)e - base == hook_entries[h])
+ newinfo->hook_entry[h] = hook_entries[h];
+ if ((unsigned char *)e - base == underflows[h])
+ newinfo->underflow[h] = underflows[h];
+ }
+
+ /* Clear counters and comefrom */
+ e->counters = ((struct ipt_counters) { 0, 0 });
+ e->comefrom = 0;
+
+ (*i)++;
+ return 0;
+out:
+ IPT_MATCH_ITERATE(e, cleanup_match, &j);
+ return ret;
+}
+
+static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
+ void **dstptr, compat_uint_t *size, const char *name,
+ const struct ipt_ip *ip, unsigned int hookmask)
+{
+ struct ipt_entry_match *dm;
+ struct ipt_match *match;
+ int ret;
+
+ dm = (struct ipt_entry_match *)*dstptr;
+ match = m->u.kernel.match;
+ if (match->compat)
+ match->compat(m, dstptr, size, COMPAT_FROM_USER);
+ else
+ xt_compat_match(m, dstptr, size, COMPAT_FROM_USER);
+
+ ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
+ name, hookmask, ip->proto,
+ ip->invflags & IPT_INV_PROTO);
+ if (ret)
+ return ret;
+
+ if (m->u.kernel.match->checkentry
+ && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
+ dm->u.match_size - sizeof(*dm),
+ hookmask)) {
+ duprintf("ip_tables: check failed for `%s'.\n",
+ m->u.kernel.match->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
+ unsigned int *size, const char *name,
+ struct xt_table_info *newinfo, unsigned char *base)
+{
+ struct ipt_entry_target *t;
+ struct ipt_target *target;
+ struct ipt_entry *de;
+ unsigned int origsize;
+ int ret, h;
+
+ ret = 0;
+ origsize = *size;
+ de = (struct ipt_entry *)*dstptr;
+ memcpy(de, e, sizeof(struct ipt_entry));
+
+ *dstptr += sizeof(struct compat_ipt_entry);
+ ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
+ name, &de->ip, de->comefrom);
+ if (ret)
+ goto out;
+ de->target_offset = e->target_offset - (origsize - *size);
+ t = ipt_get_target(e);
+ target = t->u.kernel.target;
+ if (target->compat)
+ target->compat(t, dstptr, size, COMPAT_FROM_USER);
+ else
+ xt_compat_target(t, dstptr, size, COMPAT_FROM_USER);
+
+ de->next_offset = e->next_offset - (origsize - *size);
+ for (h = 0; h < NF_IP_NUMHOOKS; h++) {
+ if ((unsigned char *)de - base < newinfo->hook_entry[h])
+ newinfo->hook_entry[h] -= origsize - *size;
+ if ((unsigned char *)de - base < newinfo->underflow[h])
+ newinfo->underflow[h] -= origsize - *size;
+ }
+
+ t = ipt_get_target(de);
+ target = t->u.kernel.target;
+ ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
+ name, e->comefrom, e->ip.proto,
+ e->ip.invflags & IPT_INV_PROTO);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ if (t->u.kernel.target == &ipt_standard_target) {
+ if (!standard_check(t, *size))
+ goto out;
+ } else if (t->u.kernel.target->checkentry
+ && !t->u.kernel.target->checkentry(name, de, target,
+ t->data, t->u.target_size - sizeof(*t),
+ de->comefrom)) {
+ duprintf("ip_tables: compat: check failed for `%s'.\n",
+ t->u.kernel.target->name);
+ goto out;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
static int
-do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+translate_compat_table(const char *name,
+ unsigned int valid_hooks,
+ struct xt_table_info **pinfo,
+ void **pentry0,
+ unsigned int total_size,
+ unsigned int number,
+ unsigned int *hook_entries,
+ unsigned int *underflows)
+{
+ unsigned int i;
+ struct xt_table_info *newinfo, *info;
+ void *pos, *entry0, *entry1;
+ unsigned int size;
+ int ret;
+
+ info = *pinfo;
+ entry0 = *pentry0;
+ size = total_size;
+ info->number = number;
+
+ /* Init all hooks to impossible value. */
+ for (i = 0; i < NF_IP_NUMHOOKS; i++) {
+ info->hook_entry[i] = 0xFFFFFFFF;
+ info->underflow[i] = 0xFFFFFFFF;
+ }
+
+ duprintf("translate_compat_table: size %u\n", info->size);
+ i = 0;
+ xt_compat_lock(AF_INET);
+ /* Walk through entries, checking offsets. */
+ ret = IPT_ENTRY_ITERATE(entry0, total_size,
+ check_compat_entry_size_and_hooks,
+ info, &size, entry0,
+ entry0 + total_size,
+ hook_entries, underflows, &i, name);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ if (i != number) {
+ duprintf("translate_compat_table: %u not %u entries\n",
+ i, number);
+ goto out_unlock;
+ }
+
+ /* Check hooks all assigned */
+ for (i = 0; i < NF_IP_NUMHOOKS; i++) {
+ /* Only hooks which are valid */
+ if (!(valid_hooks & (1 << i)))
+ continue;
+ if (info->hook_entry[i] == 0xFFFFFFFF) {
+ duprintf("Invalid hook entry %u %u\n",
+ i, hook_entries[i]);
+ goto out_unlock;
+ }
+ if (info->underflow[i] == 0xFFFFFFFF) {
+ duprintf("Invalid underflow %u %u\n",
+ i, underflows[i]);
+ goto out_unlock;
+ }
+ }
+
+ ret = -ENOMEM;
+ newinfo = xt_alloc_table_info(size);
+ if (!newinfo)
+ goto out_unlock;
+
+ newinfo->number = number;
+ for (i = 0; i < NF_IP_NUMHOOKS; i++) {
+ newinfo->hook_entry[i] = info->hook_entry[i];
+ newinfo->underflow[i] = info->underflow[i];
+ }
+ entry1 = newinfo->entries[raw_smp_processor_id()];
+ pos = entry1;
+ size = total_size;
+ ret = IPT_ENTRY_ITERATE(entry0, total_size,
+ compat_copy_entry_from_user, &pos, &size,
+ name, newinfo, entry1);
+ compat_flush_offsets();
+ xt_compat_unlock(AF_INET);
+ if (ret)
+ goto free_newinfo;
+
+ ret = -ELOOP;
+ if (!mark_source_chains(newinfo, valid_hooks, entry1))
+ goto free_newinfo;
+
+ /* And one copy for every other CPU */
+ for_each_cpu(i)
+ if (newinfo->entries[i] && newinfo->entries[i] != entry1)
+ memcpy(newinfo->entries[i], entry1, newinfo->size);
+
+ *pinfo = newinfo;
+ *pentry0 = entry1;
+ xt_free_table_info(info);
+ return 0;
+
+free_newinfo:
+ xt_free_table_info(newinfo);
+out:
+ return ret;
+out_unlock:
+ xt_compat_unlock(AF_INET);
+ goto out;
+}
+
+static int
+compat_do_replace(void __user *user, unsigned int len)
+{
+ int ret;
+ struct compat_ipt_replace tmp;
+ struct xt_table_info *newinfo;
+ void *loc_cpu_entry;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+ /* Hack: Causes ipchains to give correct error msg --RR */
+ if (len != sizeof(tmp) + tmp.size)
+ return -ENOPROTOOPT;
+
+ /* overflow check */
+ if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
+ SMP_CACHE_BYTES)
+ return -ENOMEM;
+ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ return -ENOMEM;
+
+ newinfo = xt_alloc_table_info(tmp.size);
+ if (!newinfo)
+ return -ENOMEM;
+
+ /* choose the copy that is our node/cpu */
+ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
+ if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
+ tmp.size) != 0) {
+ ret = -EFAULT;
+ goto free_newinfo;
+ }
+
+ ret = translate_compat_table(tmp.name, tmp.valid_hooks,
+ &newinfo, &loc_cpu_entry, tmp.size,
+ tmp.num_entries, tmp.hook_entry, tmp.underflow);
+ if (ret != 0)
+ goto free_newinfo;
+
+ duprintf("compat_do_replace: Translated table\n");
+
+ ret = __do_replace(tmp.name, tmp.valid_hooks,
+ newinfo, tmp.num_counters,
+ compat_ptr(tmp.counters));
+ if (ret)
+ goto free_newinfo_untrans;
+ return 0;
+
+ free_newinfo_untrans:
+ IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
+ free_newinfo:
+ xt_free_table_info(newinfo);
+ return ret;
+}
+
+static int
+compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
+ unsigned int len)
{
int ret;
@@ -1105,11 +1851,11 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
switch (cmd) {
case IPT_SO_SET_REPLACE:
- ret = do_replace(user, len);
+ ret = compat_do_replace(user, len);
break;
case IPT_SO_SET_ADD_COUNTERS:
- ret = do_add_counters(user, len);
+ ret = do_add_counters(user, len, 1);
break;
default:
@@ -1120,75 +1866,196 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
return ret;
}
+struct compat_ipt_get_entries
+{
+ char name[IPT_TABLE_MAXNAMELEN];
+ compat_uint_t size;
+ struct compat_ipt_entry entrytable[0];
+};
+
+static int compat_copy_entries_to_user(unsigned int total_size,
+ struct ipt_table *table, void __user *userptr)
+{
+ unsigned int off, num;
+ struct compat_ipt_entry e;
+ struct xt_counters *counters;
+ struct xt_table_info *private = table->private;
+ void __user *pos;
+ unsigned int size;
+ int ret = 0;
+ void *loc_cpu_entry;
+
+ counters = alloc_counters(table);
+ if (IS_ERR(counters))
+ return PTR_ERR(counters);
+
+ /* choose the copy that is on our node/cpu, ...
+ * This choice is lazy (because current thread is
+ * allowed to migrate to another cpu)
+ */
+ loc_cpu_entry = private->entries[raw_smp_processor_id()];
+ pos = userptr;
+ size = total_size;
+ ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
+ compat_copy_entry_to_user, &pos, &size);
+ if (ret)
+ goto free_counters;
+
+ /* ... then go back and fix counters and names */
+ for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
+ unsigned int i;
+ struct ipt_entry_match m;
+ struct ipt_entry_target t;
+
+ ret = -EFAULT;
+ if (copy_from_user(&e, userptr + off,
+ sizeof(struct compat_ipt_entry)))
+ goto free_counters;
+ if (copy_to_user(userptr + off +
+ offsetof(struct compat_ipt_entry, counters),
+ &counters[num], sizeof(counters[num])))
+ goto free_counters;
+
+ for (i = sizeof(struct compat_ipt_entry);
+ i < e.target_offset; i += m.u.match_size) {
+ if (copy_from_user(&m, userptr + off + i,
+ sizeof(struct ipt_entry_match)))
+ goto free_counters;
+ if (copy_to_user(userptr + off + i +
+ offsetof(struct ipt_entry_match, u.user.name),
+ m.u.kernel.match->name,
+ strlen(m.u.kernel.match->name) + 1))
+ goto free_counters;
+ }
+
+ if (copy_from_user(&t, userptr + off + e.target_offset,
+ sizeof(struct ipt_entry_target)))
+ goto free_counters;
+ if (copy_to_user(userptr + off + e.target_offset +
+ offsetof(struct ipt_entry_target, u.user.name),
+ t.u.kernel.target->name,
+ strlen(t.u.kernel.target->name) + 1))
+ goto free_counters;
+ }
+ ret = 0;
+free_counters:
+ vfree(counters);
+ return ret;
+}
+
static int
-do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
{
int ret;
+ struct compat_ipt_get_entries get;
+ struct ipt_table *t;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- switch (cmd) {
- case IPT_SO_GET_INFO: {
- char name[IPT_TABLE_MAXNAMELEN];
- struct ipt_table *t;
+ if (*len < sizeof(get)) {
+ duprintf("compat_get_entries: %u < %u\n",
+ *len, (unsigned int)sizeof(get));
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&get, uptr, sizeof(get)) != 0)
+ return -EFAULT;
+
+ if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
+ duprintf("compat_get_entries: %u != %u\n", *len,
+ (unsigned int)(sizeof(struct compat_ipt_get_entries) +
+ get.size));
+ return -EINVAL;
+ }
- if (*len != sizeof(struct ipt_getinfo)) {
- duprintf("length %u != %u\n", *len,
- sizeof(struct ipt_getinfo));
+ xt_compat_lock(AF_INET);
+ t = xt_find_table_lock(AF_INET, get.name);
+ if (t && !IS_ERR(t)) {
+ struct xt_table_info *private = t->private;
+ struct xt_table_info info;
+ duprintf("t->private->number = %u\n",
+ private->number);
+ ret = compat_table_info(private, &info);
+ if (!ret && get.size == info.size) {
+ ret = compat_copy_entries_to_user(private->size,
+ t, uptr->entrytable);
+ } else if (!ret) {
+ duprintf("compat_get_entries: I've got %u not %u!\n",
+ private->size,
+ get.size);
ret = -EINVAL;
- break;
}
+ compat_flush_offsets();
+ module_put(t->me);
+ xt_table_unlock(t);
+ } else
+ ret = t ? PTR_ERR(t) : -ENOENT;
- if (copy_from_user(name, user, sizeof(name)) != 0) {
- ret = -EFAULT;
- break;
- }
- name[IPT_TABLE_MAXNAMELEN-1] = '\0';
-
- t = try_then_request_module(xt_find_table_lock(AF_INET, name),
- "iptable_%s", name);
- if (t && !IS_ERR(t)) {
- struct ipt_getinfo info;
- struct xt_table_info *private = t->private;
-
- info.valid_hooks = t->valid_hooks;
- memcpy(info.hook_entry, private->hook_entry,
- sizeof(info.hook_entry));
- memcpy(info.underflow, private->underflow,
- sizeof(info.underflow));
- info.num_entries = private->number;
- info.size = private->size;
- memcpy(info.name, name, sizeof(info.name));
-
- if (copy_to_user(user, &info, *len) != 0)
- ret = -EFAULT;
- else
- ret = 0;
- xt_table_unlock(t);
- module_put(t->me);
- } else
- ret = t ? PTR_ERR(t) : -ENOENT;
+ xt_compat_unlock(AF_INET);
+ return ret;
+}
+
+static int
+compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+{
+ int ret;
+
+ switch (cmd) {
+ case IPT_SO_GET_INFO:
+ ret = get_info(user, len, 1);
+ break;
+ case IPT_SO_GET_ENTRIES:
+ ret = compat_get_entries(user, len);
+ break;
+ default:
+ duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd);
+ ret = -EINVAL;
}
- break;
+ return ret;
+}
+#endif
- case IPT_SO_GET_ENTRIES: {
- struct ipt_get_entries get;
+static int
+do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+{
+ int ret;
- if (*len < sizeof(get)) {
- duprintf("get_entries: %u < %u\n", *len, sizeof(get));
- ret = -EINVAL;
- } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
- ret = -EFAULT;
- } else if (*len != sizeof(struct ipt_get_entries) + get.size) {
- duprintf("get_entries: %u != %u\n", *len,
- sizeof(struct ipt_get_entries) + get.size);
- ret = -EINVAL;
- } else
- ret = get_entries(&get, user);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case IPT_SO_SET_REPLACE:
+ ret = do_replace(user, len);
break;
+
+ case IPT_SO_SET_ADD_COUNTERS:
+ ret = do_add_counters(user, len, 0);
+ break;
+
+ default:
+ duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
+ ret = -EINVAL;
}
+ return ret;
+}
+
+static int
+do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+{
+ int ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case IPT_SO_GET_INFO:
+ ret = get_info(user, len, 0);
+ break;
+
+ case IPT_SO_GET_ENTRIES:
+ ret = get_entries(user, len);
+ break;
+
case IPT_SO_GET_REVISION_MATCH:
case IPT_SO_GET_REVISION_TARGET: {
struct ipt_get_revision rev;
@@ -1336,6 +2203,9 @@ static struct ipt_target ipt_standard_target = {
.name = IPT_STANDARD_TARGET,
.targetsize = sizeof(int),
.family = AF_INET,
+#ifdef CONFIG_COMPAT
+ .compat = &compat_ipt_standard_fn,
+#endif
};
static struct ipt_target ipt_error_target = {
@@ -1350,9 +2220,15 @@ static struct nf_sockopt_ops ipt_sockopts = {
.set_optmin = IPT_BASE_CTL,
.set_optmax = IPT_SO_SET_MAX+1,
.set = do_ipt_set_ctl,
+#ifdef CONFIG_COMPAT
+ .compat_set = compat_do_ipt_set_ctl,
+#endif
.get_optmin = IPT_BASE_CTL,
.get_optmax = IPT_SO_GET_MAX+1,
.get = do_ipt_get_ctl,
+#ifdef CONFIG_COMPAT
+ .compat_get = compat_do_ipt_get_ctl,
+#endif
};
static struct ipt_match icmp_matchstruct = {
diff --git a/net/ipv4/netfilter/ipt_multiport.c b/net/ipv4/netfilter/ipt_multiport.c
deleted file mode 100644
index ac95d8390bcc0..0000000000000
--- a/net/ipv4/netfilter/ipt_multiport.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/* Kernel module to match one of a list of TCP/UDP ports: ports are in
- the same place so we can treat them as equal. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/udp.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter_ipv4/ipt_multiport.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("iptables multiple port match module");
-
-#if 0
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-/* Returns 1 if the port is matched by the test, 0 otherwise. */
-static inline int
-ports_match(const u_int16_t *portlist, enum ipt_multiport_flags flags,
- u_int8_t count, u_int16_t src, u_int16_t dst)
-{
- unsigned int i;
- for (i=0; i<count; i++) {
- if (flags != IPT_MULTIPORT_DESTINATION
- && portlist[i] == src)
- return 1;
-
- if (flags != IPT_MULTIPORT_SOURCE
- && portlist[i] == dst)
- return 1;
- }
-
- return 0;
-}
-
-/* Returns 1 if the port is matched by the test, 0 otherwise. */
-static inline int
-ports_match_v1(const struct ipt_multiport_v1 *minfo,
- u_int16_t src, u_int16_t dst)
-{
- unsigned int i;
- u_int16_t s, e;
-
- for (i=0; i < minfo->count; i++) {
- s = minfo->ports[i];
-
- if (minfo->pflags[i]) {
- /* range port matching */
- e = minfo->ports[++i];
- duprintf("src or dst matches with %d-%d?\n", s, e);
-
- if (minfo->flags == IPT_MULTIPORT_SOURCE
- && src >= s && src <= e)
- return 1 ^ minfo->invert;
- if (minfo->flags == IPT_MULTIPORT_DESTINATION
- && dst >= s && dst <= e)
- return 1 ^ minfo->invert;
- if (minfo->flags == IPT_MULTIPORT_EITHER
- && ((dst >= s && dst <= e)
- || (src >= s && src <= e)))
- return 1 ^ minfo->invert;
- } else {
- /* exact port matching */
- duprintf("src or dst matches with %d?\n", s);
-
- if (minfo->flags == IPT_MULTIPORT_SOURCE
- && src == s)
- return 1 ^ minfo->invert;
- if (minfo->flags == IPT_MULTIPORT_DESTINATION
- && dst == s)
- return 1 ^ minfo->invert;
- if (minfo->flags == IPT_MULTIPORT_EITHER
- && (src == s || dst == s))
- return 1 ^ minfo->invert;
- }
- }
-
- return minfo->invert;
-}
-
-static int
-match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct xt_match *match,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- u16 _ports[2], *pptr;
- const struct ipt_multiport *multiinfo = matchinfo;
-
- if (offset)
- return 0;
-
- pptr = skb_header_pointer(skb, protoff,
- sizeof(_ports), _ports);
- if (pptr == NULL) {
- /* We've been asked to examine this packet, and we
- * can't. Hence, no choice but to drop.
- */
- duprintf("ipt_multiport:"
- " Dropping evil offset=0 tinygram.\n");
- *hotdrop = 1;
- return 0;
- }
-
- return ports_match(multiinfo->ports,
- multiinfo->flags, multiinfo->count,
- ntohs(pptr[0]), ntohs(pptr[1]));
-}
-
-static int
-match_v1(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct xt_match *match,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- u16 _ports[2], *pptr;
- const struct ipt_multiport_v1 *multiinfo = matchinfo;
-
- if (offset)
- return 0;
-
- pptr = skb_header_pointer(skb, protoff,
- sizeof(_ports), _ports);
- if (pptr == NULL) {
- /* We've been asked to examine this packet, and we
- * can't. Hence, no choice but to drop.
- */
- duprintf("ipt_multiport:"
- " Dropping evil offset=0 tinygram.\n");
- *hotdrop = 1;
- return 0;
- }
-
- return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1]));
-}
-
-static struct ipt_match multiport_match = {
- .name = "multiport",
- .revision = 0,
- .match = match,
- .matchsize = sizeof(struct ipt_multiport),
- .me = THIS_MODULE,
-};
-
-static struct ipt_match multiport_match_v1 = {
- .name = "multiport",
- .revision = 1,
- .match = match_v1,
- .matchsize = sizeof(struct ipt_multiport_v1),
- .me = THIS_MODULE,
-};
-
-static int __init ipt_multiport_init(void)
-{
- int err;
-
- err = ipt_register_match(&multiport_match);
- if (!err) {
- err = ipt_register_match(&multiport_match_v1);
- if (err)
- ipt_unregister_match(&multiport_match);
- }
-
- return err;
-}
-
-static void __exit ipt_multiport_fini(void)
-{
- ipt_unregister_match(&multiport_match);
- ipt_unregister_match(&multiport_match_v1);
-}
-
-module_init(ipt_multiport_init);
-module_exit(ipt_multiport_fini);
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 850d919591d1c..e1b8f4b90d802 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -68,7 +68,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
{
int err;
u32 spi, seq;
- struct sec_decap_state xfrm_vec[XFRM_MAX_DEPTH];
+ struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
@@ -90,14 +90,16 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
if (unlikely(x->km.state != XFRM_STATE_VALID))
goto drop_unlock;
+ if (x->encap->encap_type != encap_type)
+ goto drop_unlock;
+
if (x->props.replay_window && xfrm_replay_check(x, seq))
goto drop_unlock;
if (xfrm_state_check_expire(x))
goto drop_unlock;
- xfrm_vec[xfrm_nr].decap.decap_type = encap_type;
- if (x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb))
+ if (x->type->input(x, skb))
goto drop_unlock;
/* only the first xfrm gets the encap type */
@@ -111,7 +113,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
spin_unlock(&x->lock);
- xfrm_vec[xfrm_nr++].xvec = x;
+ xfrm_vec[xfrm_nr++] = x;
iph = skb->nh.iph;
@@ -153,7 +155,8 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
goto drop;
- memcpy(skb->sp->x+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(struct sec_decap_state));
+ memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec,
+ xfrm_nr * sizeof(xfrm_vec[0]));
skb->sp->len += xfrm_nr;
nf_reset(skb);
@@ -184,7 +187,7 @@ drop_unlock:
xfrm_state_put(x);
drop:
while (--xfrm_nr >= 0)
- xfrm_state_put(xfrm_vec[xfrm_nr].xvec);
+ xfrm_state_put(xfrm_vec[xfrm_nr]);
kfree_skb(skb);
return 0;
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 2d670935c2b5c..f8ceaa127c836 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -21,7 +21,7 @@ static int ipip_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
-static int ipip_xfrm_rcv(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
+static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb)
{
return 0;
}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index cf58251df4b32..6778173a3dda8 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -229,7 +229,7 @@ error:
return err;
}
-static int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
+static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
{
/*
* Before process AH
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 3dcaac7a09723..22f0460790375 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -130,7 +130,7 @@ error:
return err;
}
-static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
+static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *iph;
struct ipv6_esp_hdr *esph;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index d4cfec3f414e3..00f3fadfcca7f 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -63,7 +63,7 @@ static void **ipcomp6_scratches;
static int ipcomp6_scratch_users;
static LIST_HEAD(ipcomp6_tfms_list);
-static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
+static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
u8 nexthdr = 0;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 98f78759f1abb..4bc4e5b337941 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -87,16 +87,6 @@ config IP6_NF_MATCH_HL
To compile it as a module, choose M here. If unsure, say N.
-config IP6_NF_MATCH_MULTIPORT
- tristate "Multiple port match support"
- depends on IP6_NF_IPTABLES
- help
- Multiport matching allows you to match TCP or UDP packets based on
- a series of source or destination ports: normally a rule can only
- match a single range of ports.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP6_NF_MATCH_OWNER
tristate "Owner match support"
depends on IP6_NF_IPTABLES
@@ -115,11 +105,11 @@ config IP6_NF_MATCH_IPV6HEADER
To compile it as a module, choose M here. If unsure, say N.
-config IP6_NF_MATCH_AHESP
- tristate "AH/ESP match support"
+config IP6_NF_MATCH_AH
+ tristate "AH match support"
depends on IP6_NF_IPTABLES
help
- This module allows one to match AH and ESP packets.
+ This module allows one to match AH packets.
To compile it as a module, choose M here. If unsure, say N.
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 8436a1a1731f4..eeeb57d4c9c55 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -8,9 +8,8 @@ obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
-obj-$(CONFIG_IP6_NF_MATCH_AHESP) += ip6t_esp.o ip6t_ah.o
+obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
-obj-$(CONFIG_IP6_NF_MATCH_MULTIPORT) += ip6t_multiport.o
obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
diff --git a/net/ipv6/netfilter/ip6t_esp.c b/net/ipv6/netfilter/ip6t_esp.c
deleted file mode 100644
index 36bedad2c6f74..0000000000000
--- a/net/ipv6/netfilter/ip6t_esp.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/* Kernel module to match ESP parameters. */
-/* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/types.h>
-#include <net/checksum.h>
-#include <net/ipv6.h>
-
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter_ipv6/ip6t_esp.h>
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("IPv6 ESP match");
-MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-/* Returns 1 if the spi is matched by the range, 0 otherwise */
-static inline int
-spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert)
-{
- int r=0;
- DEBUGP("esp spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
- min,spi,max);
- r=(spi >= min && spi <= max) ^ invert;
- DEBUGP(" result %s\n",r? "PASS\n" : "FAILED\n");
- return r;
-}
-
-static int
-match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct xt_match *match,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- struct ip_esp_hdr _esp, *eh;
- const struct ip6t_esp *espinfo = matchinfo;
- unsigned int ptr;
-
- /* Make sure this isn't an evil packet */
- /*DEBUGP("ipv6_esp entered \n");*/
-
- if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ESP, NULL) < 0)
- return 0;
-
- eh = skb_header_pointer(skb, ptr, sizeof(_esp), &_esp);
- if (eh == NULL) {
- *hotdrop = 1;
- return 0;
- }
-
- DEBUGP("IPv6 ESP SPI %u %08X\n", ntohl(eh->spi), ntohl(eh->spi));
-
- return (eh != NULL)
- && spi_match(espinfo->spis[0], espinfo->spis[1],
- ntohl(eh->spi),
- !!(espinfo->invflags & IP6T_ESP_INV_SPI));
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-checkentry(const char *tablename,
- const void *ip,
- const struct xt_match *match,
- void *matchinfo,
- unsigned int matchinfosize,
- unsigned int hook_mask)
-{
- const struct ip6t_esp *espinfo = matchinfo;
-
- if (espinfo->invflags & ~IP6T_ESP_INV_MASK) {
- DEBUGP("ip6t_esp: unknown flags %X\n",
- espinfo->invflags);
- return 0;
- }
- return 1;
-}
-
-static struct ip6t_match esp_match = {
- .name = "esp",
- .match = match,
- .matchsize = sizeof(struct ip6t_esp),
- .checkentry = checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init ip6t_esp_init(void)
-{
- return ip6t_register_match(&esp_match);
-}
-
-static void __exit ip6t_esp_fini(void)
-{
- ip6t_unregister_match(&esp_match);
-}
-
-module_init(ip6t_esp_init);
-module_exit(ip6t_esp_fini);
diff --git a/net/ipv6/netfilter/ip6t_multiport.c b/net/ipv6/netfilter/ip6t_multiport.c
deleted file mode 100644
index 10c48ba596d64..0000000000000
--- a/net/ipv6/netfilter/ip6t_multiport.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/* Kernel module to match one of a list of TCP/UDP ports: ports are in
- the same place so we can treat them as equal. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/udp.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-
-#include <linux/netfilter_ipv6/ip6t_multiport.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("ip6tables match for multiple ports");
-
-#if 0
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-/* Returns 1 if the port is matched by the test, 0 otherwise. */
-static inline int
-ports_match(const u_int16_t *portlist, enum ip6t_multiport_flags flags,
- u_int8_t count, u_int16_t src, u_int16_t dst)
-{
- unsigned int i;
- for (i=0; i<count; i++) {
- if (flags != IP6T_MULTIPORT_DESTINATION
- && portlist[i] == src)
- return 1;
-
- if (flags != IP6T_MULTIPORT_SOURCE
- && portlist[i] == dst)
- return 1;
- }
-
- return 0;
-}
-
-static int
-match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const struct xt_match *match,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- u16 _ports[2], *pptr;
- const struct ip6t_multiport *multiinfo = matchinfo;
-
- /* Must not be a fragment. */
- if (offset)
- return 0;
-
- /* Must be big enough to read ports (both UDP and TCP have
- them at the start). */
- pptr = skb_header_pointer(skb, protoff, sizeof(_ports), &_ports[0]);
- if (pptr == NULL) {
- /* We've been asked to examine this packet, and we
- * can't. Hence, no choice but to drop.
- */
- duprintf("ip6t_multiport:"
- " Dropping evil offset=0 tinygram.\n");
- *hotdrop = 1;
- return 0;
- }
-
- return ports_match(multiinfo->ports,
- multiinfo->flags, multiinfo->count,
- ntohs(pptr[0]), ntohs(pptr[1]));
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-checkentry(const char *tablename,
- const void *info,
- const struct xt_match *match,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- const struct ip6t_ip6 *ip = info;
- const struct ip6t_multiport *multiinfo = matchinfo;
-
- /* Must specify proto == TCP/UDP, no unknown flags or bad count */
- return (ip->proto == IPPROTO_TCP || ip->proto == IPPROTO_UDP)
- && !(ip->invflags & IP6T_INV_PROTO)
- && (multiinfo->flags == IP6T_MULTIPORT_SOURCE
- || multiinfo->flags == IP6T_MULTIPORT_DESTINATION
- || multiinfo->flags == IP6T_MULTIPORT_EITHER)
- && multiinfo->count <= IP6T_MULTI_PORTS;
-}
-
-static struct ip6t_match multiport_match = {
- .name = "multiport",
- .match = match,
- .matchsize = sizeof(struct ip6t_multiport),
- .checkentry = checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init ip6t_multiport_init(void)
-{
- return ip6t_register_match(&multiport_match);
-}
-
-static void __exit ip6t_multiport_fini(void)
-{
- ip6t_unregister_match(&multiport_match);
-}
-
-module_init(ip6t_multiport_init);
-module_exit(ip6t_multiport_fini);
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index cccf8b76f0467..00cfdee18dcac 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -32,7 +32,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi)
{
int err;
u32 seq;
- struct sec_decap_state xfrm_vec[XFRM_MAX_DEPTH];
+ struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
@@ -65,7 +65,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi)
if (xfrm_state_check_expire(x))
goto drop_unlock;
- nexthdr = x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb);
+ nexthdr = x->type->input(x, skb);
if (nexthdr <= 0)
goto drop_unlock;
@@ -79,7 +79,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi)
spin_unlock(&x->lock);
- xfrm_vec[xfrm_nr++].xvec = x;
+ xfrm_vec[xfrm_nr++] = x;
if (x->props.mode) { /* XXX */
if (nexthdr != IPPROTO_IPV6)
@@ -118,7 +118,8 @@ int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi)
if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
goto drop;
- memcpy(skb->sp->x+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(struct sec_decap_state));
+ memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec,
+ xfrm_nr * sizeof(xfrm_vec[0]));
skb->sp->len += xfrm_nr;
skb->ip_summed = CHECKSUM_NONE;
@@ -149,7 +150,7 @@ drop_unlock:
xfrm_state_put(x);
drop:
while (--xfrm_nr >= 0)
- xfrm_state_put(xfrm_vec[xfrm_nr].xvec);
+ xfrm_state_put(xfrm_vec[xfrm_nr]);
kfree_skb(skb);
return -1;
}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index a8f6776c518d8..d37768e5064f8 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -351,7 +351,7 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
-static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
+static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
return 0;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 332acb37b3855..e2893effdfaae 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -231,6 +231,15 @@ config NETFILTER_XT_MATCH_DCCP
If you want to compile it as a module, say M here and read
<file:Documentation/modules.txt>. If unsure, say `N'.
+config NETFILTER_XT_MATCH_ESP
+ tristate '"ESP" match support'
+ depends on NETFILTER_XTABLES
+ help
+ This match extension allows you to match a range of SPIs
+ inside ESP header of IPSec packets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_HELPER
tristate '"helper" match support'
depends on NETFILTER_XTABLES
@@ -289,6 +298,16 @@ config NETFILTER_XT_MATCH_POLICY
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_MULTIPORT
+ tristate "Multiple port match support"
+ depends on NETFILTER_XTABLES
+ help
+ Multiport matching allows you to match TCP or UDP packets based on
+ a series of source or destination ports: normally a rule can only
+ match a single range of ports.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_PHYSDEV
tristate '"physdev" match support'
depends on NETFILTER_XTABLES && BRIDGE_NETFILTER
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 9558727f5e794..95b7e416512db 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -35,11 +35,13 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0e0e9d7b34c80..bd10eb944b656 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1022,7 +1022,7 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[])
return err;
}
-#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
+#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK-1])
ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
#endif
@@ -1062,7 +1062,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
return err;
}
-#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
+#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK-1])
ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
#endif
@@ -1687,7 +1687,7 @@ static void __exit ctnetlink_exit(void)
printk("ctnetlink: unregistering from nfnetlink.\n");
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- nf_conntrack_unregister_notifier(&ctnl_notifier_exp);
+ nf_conntrack_expect_unregister_notifier(&ctnl_notifier_exp);
nf_conntrack_unregister_notifier(&ctnl_notifier);
#endif
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a657ab5394c35..feb8a9e066b08 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -38,6 +38,7 @@ struct xt_af {
struct list_head match;
struct list_head target;
struct list_head tables;
+ struct mutex compat_mutex;
};
static struct xt_af *xt;
@@ -272,6 +273,54 @@ int xt_check_match(const struct xt_match *match, unsigned short family,
}
EXPORT_SYMBOL_GPL(xt_check_match);
+#ifdef CONFIG_COMPAT
+int xt_compat_match(void *match, void **dstptr, int *size, int convert)
+{
+ struct xt_match *m;
+ struct compat_xt_entry_match *pcompat_m;
+ struct xt_entry_match *pm;
+ u_int16_t msize;
+ int off, ret;
+
+ ret = 0;
+ m = ((struct xt_entry_match *)match)->u.kernel.match;
+ off = XT_ALIGN(m->matchsize) - COMPAT_XT_ALIGN(m->matchsize);
+ switch (convert) {
+ case COMPAT_TO_USER:
+ pm = (struct xt_entry_match *)match;
+ msize = pm->u.user.match_size;
+ if (__copy_to_user(*dstptr, pm, msize)) {
+ ret = -EFAULT;
+ break;
+ }
+ msize -= off;
+ if (put_user(msize, (u_int16_t *)*dstptr))
+ ret = -EFAULT;
+ *size -= off;
+ *dstptr += msize;
+ break;
+ case COMPAT_FROM_USER:
+ pcompat_m = (struct compat_xt_entry_match *)match;
+ pm = (struct xt_entry_match *)*dstptr;
+ msize = pcompat_m->u.user.match_size;
+ memcpy(pm, pcompat_m, msize);
+ msize += off;
+ pm->u.user.match_size = msize;
+ *size += off;
+ *dstptr += msize;
+ break;
+ case COMPAT_CALC_SIZE:
+ *size += off;
+ break;
+ default:
+ ret = -ENOPROTOOPT;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xt_compat_match);
+#endif
+
int xt_check_target(const struct xt_target *target, unsigned short family,
unsigned int size, const char *table, unsigned int hook_mask,
unsigned short proto, int inv_proto)
@@ -301,6 +350,54 @@ int xt_check_target(const struct xt_target *target, unsigned short family,
}
EXPORT_SYMBOL_GPL(xt_check_target);
+#ifdef CONFIG_COMPAT
+int xt_compat_target(void *target, void **dstptr, int *size, int convert)
+{
+ struct xt_target *t;
+ struct compat_xt_entry_target *pcompat;
+ struct xt_entry_target *pt;
+ u_int16_t tsize;
+ int off, ret;
+
+ ret = 0;
+ t = ((struct xt_entry_target *)target)->u.kernel.target;
+ off = XT_ALIGN(t->targetsize) - COMPAT_XT_ALIGN(t->targetsize);
+ switch (convert) {
+ case COMPAT_TO_USER:
+ pt = (struct xt_entry_target *)target;
+ tsize = pt->u.user.target_size;
+ if (__copy_to_user(*dstptr, pt, tsize)) {
+ ret = -EFAULT;
+ break;
+ }
+ tsize -= off;
+ if (put_user(tsize, (u_int16_t *)*dstptr))
+ ret = -EFAULT;
+ *size -= off;
+ *dstptr += tsize;
+ break;
+ case COMPAT_FROM_USER:
+ pcompat = (struct compat_xt_entry_target *)target;
+ pt = (struct xt_entry_target *)*dstptr;
+ tsize = pcompat->u.user.target_size;
+ memcpy(pt, pcompat, tsize);
+ tsize += off;
+ pt->u.user.target_size = tsize;
+ *size += off;
+ *dstptr += tsize;
+ break;
+ case COMPAT_CALC_SIZE:
+ *size += off;
+ break;
+ default:
+ ret = -ENOPROTOOPT;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xt_compat_target);
+#endif
+
struct xt_table_info *xt_alloc_table_info(unsigned int size)
{
struct xt_table_info *newinfo;
@@ -371,6 +468,19 @@ void xt_table_unlock(struct xt_table *table)
}
EXPORT_SYMBOL_GPL(xt_table_unlock);
+#ifdef CONFIG_COMPAT
+void xt_compat_lock(int af)
+{
+ mutex_lock(&xt[af].compat_mutex);
+}
+EXPORT_SYMBOL_GPL(xt_compat_lock);
+
+void xt_compat_unlock(int af)
+{
+ mutex_unlock(&xt[af].compat_mutex);
+}
+EXPORT_SYMBOL_GPL(xt_compat_unlock);
+#endif
struct xt_table_info *
xt_replace_table(struct xt_table *table,
@@ -671,6 +781,9 @@ static int __init xt_init(void)
for (i = 0; i < NPROTO; i++) {
mutex_init(&xt[i].mutex);
+#ifdef CONFIG_COMPAT
+ mutex_init(&xt[i].compat_mutex);
+#endif
INIT_LIST_HEAD(&xt[i].target);
INIT_LIST_HEAD(&xt[i].match);
INIT_LIST_HEAD(&xt[i].tables);
diff --git a/net/ipv4/netfilter/ipt_esp.c b/net/netfilter/xt_esp.c
index 3840b417a3c51..9dad6281e0c10 100644
--- a/net/ipv4/netfilter/ipt_esp.c
+++ b/net/netfilter/xt_esp.c
@@ -9,16 +9,22 @@
#include <linux/module.h>
#include <linux/skbuff.h>
+#include <linux/in.h>
#include <linux/ip.h>
-#include <linux/netfilter_ipv4/ipt_esp.h>
+#include <linux/netfilter/xt_esp.h>
+#include <linux/netfilter/x_tables.h>
+
#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
-MODULE_DESCRIPTION("iptables ESP SPI match module");
+MODULE_DESCRIPTION("x_tables ESP SPI match module");
+MODULE_ALIAS("ipt_esp");
+MODULE_ALIAS("ip6t_esp");
-#ifdef DEBUG_CONNTRACK
+#if 0
#define duprintf(format, args...) printk(format , ## args)
#else
#define duprintf(format, args...)
@@ -28,11 +34,11 @@ MODULE_DESCRIPTION("iptables ESP SPI match module");
static inline int
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert)
{
- int r=0;
- duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
- min,spi,max);
- r=(spi >= min && spi <= max) ^ invert;
- duprintf(" result %s\n",r? "PASS" : "FAILED");
+ int r = 0;
+ duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
+ min, spi, max);
+ r = (spi >= min && spi <= max) ^ invert;
+ duprintf(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
@@ -47,14 +53,13 @@ match(const struct sk_buff *skb,
int *hotdrop)
{
struct ip_esp_hdr _esp, *eh;
- const struct ipt_esp *espinfo = matchinfo;
+ const struct xt_esp *espinfo = matchinfo;
/* Must not be a fragment. */
if (offset)
return 0;
- eh = skb_header_pointer(skb, protoff,
- sizeof(_esp), &_esp);
+ eh = skb_header_pointer(skb, protoff, sizeof(_esp), &_esp);
if (eh == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
@@ -64,9 +69,8 @@ match(const struct sk_buff *skb,
return 0;
}
- return spi_match(espinfo->spis[0], espinfo->spis[1],
- ntohl(eh->spi),
- !!(espinfo->invflags & IPT_ESP_INV_SPI));
+ return spi_match(espinfo->spis[0], espinfo->spis[1], ntohl(eh->spi),
+ !!(espinfo->invflags & XT_ESP_INV_SPI));
}
/* Called when user tries to insert an entry of this type. */
@@ -78,34 +82,55 @@ checkentry(const char *tablename,
unsigned int matchinfosize,
unsigned int hook_mask)
{
- const struct ipt_esp *espinfo = matchinfo;
+ const struct xt_esp *espinfo = matchinfo;
- /* Must specify no unknown invflags */
- if (espinfo->invflags & ~IPT_ESP_INV_MASK) {
- duprintf("ipt_esp: unknown flags %X\n", espinfo->invflags);
+ if (espinfo->invflags & ~XT_ESP_INV_MASK) {
+ duprintf("xt_esp: unknown flags %X\n", espinfo->invflags);
return 0;
}
+
return 1;
}
-static struct ipt_match esp_match = {
+static struct xt_match esp_match = {
.name = "esp",
- .match = match,
- .matchsize = sizeof(struct ipt_esp),
+ .family = AF_INET,
.proto = IPPROTO_ESP,
- .checkentry = checkentry,
+ .match = &match,
+ .matchsize = sizeof(struct xt_esp),
+ .checkentry = &checkentry,
.me = THIS_MODULE,
};
-static int __init ipt_esp_init(void)
+static struct xt_match esp6_match = {
+ .name = "esp",
+ .family = AF_INET6,
+ .proto = IPPROTO_ESP,
+ .match = &match,
+ .matchsize = sizeof(struct xt_esp),
+ .checkentry = &checkentry,
+ .me = THIS_MODULE,
+};
+
+static int __init xt_esp_init(void)
{
- return ipt_register_match(&esp_match);
+ int ret;
+ ret = xt_register_match(&esp_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(&esp6_match);
+ if (ret)
+ xt_unregister_match(&esp_match);
+
+ return ret;
}
-static void __exit ipt_esp_fini(void)
+static void __exit xt_esp_cleanup(void)
{
- ipt_unregister_match(&esp_match);
+ xt_unregister_match(&esp_match);
+ xt_unregister_match(&esp6_match);
}
-module_init(ipt_esp_init);
-module_exit(ipt_esp_fini);
+module_init(xt_esp_init);
+module_exit(xt_esp_cleanup);
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c
new file mode 100644
index 0000000000000..b56cd2baaac22
--- /dev/null
+++ b/net/netfilter/xt_multiport.c
@@ -0,0 +1,314 @@
+/* Kernel module to match one of a list of TCP/UDP ports: ports are in
+ the same place so we can treat them as equal. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+MODULE_DESCRIPTION("x_tables multiple port match module");
+MODULE_ALIAS("ipt_multiport");
+MODULE_ALIAS("ip6t_multiport");
+
+#if 0
+#define duprintf(format, args...) printk(format , ## args)
+#else
+#define duprintf(format, args...)
+#endif
+
+/* Returns 1 if the port is matched by the test, 0 otherwise. */
+static inline int
+ports_match(const u_int16_t *portlist, enum xt_multiport_flags flags,
+ u_int8_t count, u_int16_t src, u_int16_t dst)
+{
+ unsigned int i;
+ for (i = 0; i < count; i++) {
+ if (flags != XT_MULTIPORT_DESTINATION && portlist[i] == src)
+ return 1;
+
+ if (flags != XT_MULTIPORT_SOURCE && portlist[i] == dst)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Returns 1 if the port is matched by the test, 0 otherwise. */
+static inline int
+ports_match_v1(const struct xt_multiport_v1 *minfo,
+ u_int16_t src, u_int16_t dst)
+{
+ unsigned int i;
+ u_int16_t s, e;
+
+ for (i = 0; i < minfo->count; i++) {
+ s = minfo->ports[i];
+
+ if (minfo->pflags[i]) {
+ /* range port matching */
+ e = minfo->ports[++i];
+ duprintf("src or dst matches with %d-%d?\n", s, e);
+
+ if (minfo->flags == XT_MULTIPORT_SOURCE
+ && src >= s && src <= e)
+ return 1 ^ minfo->invert;
+ if (minfo->flags == XT_MULTIPORT_DESTINATION
+ && dst >= s && dst <= e)
+ return 1 ^ minfo->invert;
+ if (minfo->flags == XT_MULTIPORT_EITHER
+ && ((dst >= s && dst <= e)
+ || (src >= s && src <= e)))
+ return 1 ^ minfo->invert;
+ } else {
+ /* exact port matching */
+ duprintf("src or dst matches with %d?\n", s);
+
+ if (minfo->flags == XT_MULTIPORT_SOURCE
+ && src == s)
+ return 1 ^ minfo->invert;
+ if (minfo->flags == XT_MULTIPORT_DESTINATION
+ && dst == s)
+ return 1 ^ minfo->invert;
+ if (minfo->flags == XT_MULTIPORT_EITHER
+ && (src == s || dst == s))
+ return 1 ^ minfo->invert;
+ }
+ }
+
+ return minfo->invert;
+}
+
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct xt_match *match,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ u16 _ports[2], *pptr;
+ const struct xt_multiport *multiinfo = matchinfo;
+
+ if (offset)
+ return 0;
+
+ pptr = skb_header_pointer(skb, protoff, sizeof(_ports), _ports);
+ if (pptr == NULL) {
+ /* We've been asked to examine this packet, and we
+ * can't. Hence, no choice but to drop.
+ */
+ duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
+ *hotdrop = 1;
+ return 0;
+ }
+
+ return ports_match(multiinfo->ports,
+ multiinfo->flags, multiinfo->count,
+ ntohs(pptr[0]), ntohs(pptr[1]));
+}
+
+static int
+match_v1(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct xt_match *match,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ u16 _ports[2], *pptr;
+ const struct xt_multiport_v1 *multiinfo = matchinfo;
+
+ if (offset)
+ return 0;
+
+ pptr = skb_header_pointer(skb, protoff, sizeof(_ports), _ports);
+ if (pptr == NULL) {
+ /* We've been asked to examine this packet, and we
+ * can't. Hence, no choice but to drop.
+ */
+ duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
+ *hotdrop = 1;
+ return 0;
+ }
+
+ return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1]));
+}
+
+static inline int
+check(u_int16_t proto,
+ u_int8_t ip_invflags,
+ u_int8_t match_flags,
+ u_int8_t count)
+{
+ /* Must specify proto == TCP/UDP, no unknown flags or bad count */
+ return (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
+ && !(ip_invflags & XT_INV_PROTO)
+ && (match_flags == XT_MULTIPORT_SOURCE
+ || match_flags == XT_MULTIPORT_DESTINATION
+ || match_flags == XT_MULTIPORT_EITHER)
+ && count <= XT_MULTI_PORTS;
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+checkentry(const char *tablename,
+ const void *info,
+ const struct xt_match *match,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ipt_ip *ip = info;
+ const struct xt_multiport *multiinfo = matchinfo;
+
+ return check(ip->proto, ip->invflags, multiinfo->flags,
+ multiinfo->count);
+}
+
+static int
+checkentry_v1(const char *tablename,
+ const void *info,
+ const struct xt_match *match,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ipt_ip *ip = info;
+ const struct xt_multiport_v1 *multiinfo = matchinfo;
+
+ return check(ip->proto, ip->invflags, multiinfo->flags,
+ multiinfo->count);
+}
+
+static int
+checkentry6(const char *tablename,
+ const void *info,
+ const struct xt_match *match,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ip6t_ip6 *ip = info;
+ const struct xt_multiport *multiinfo = matchinfo;
+
+ return check(ip->proto, ip->invflags, multiinfo->flags,
+ multiinfo->count);
+}
+
+static int
+checkentry6_v1(const char *tablename,
+ const void *info,
+ const struct xt_match *match,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ip6t_ip6 *ip = info;
+ const struct xt_multiport_v1 *multiinfo = matchinfo;
+
+ return check(ip->proto, ip->invflags, multiinfo->flags,
+ multiinfo->count);
+}
+
+static struct xt_match multiport_match = {
+ .name = "multiport",
+ .revision = 0,
+ .matchsize = sizeof(struct xt_multiport),
+ .match = &match,
+ .checkentry = &checkentry,
+ .family = AF_INET,
+ .me = THIS_MODULE,
+};
+
+static struct xt_match multiport_match_v1 = {
+ .name = "multiport",
+ .revision = 1,
+ .matchsize = sizeof(struct xt_multiport_v1),
+ .match = &match_v1,
+ .checkentry = &checkentry_v1,
+ .family = AF_INET,
+ .me = THIS_MODULE,
+};
+
+static struct xt_match multiport6_match = {
+ .name = "multiport",
+ .revision = 0,
+ .matchsize = sizeof(struct xt_multiport),
+ .match = &match,
+ .checkentry = &checkentry6,
+ .family = AF_INET6,
+ .me = THIS_MODULE,
+};
+
+static struct xt_match multiport6_match_v1 = {
+ .name = "multiport",
+ .revision = 1,
+ .matchsize = sizeof(struct xt_multiport_v1),
+ .match = &match_v1,
+ .checkentry = &checkentry6_v1,
+ .family = AF_INET6,
+ .me = THIS_MODULE,
+};
+
+static int __init xt_multiport_init(void)
+{
+ int ret;
+
+ ret = xt_register_match(&multiport_match);
+ if (ret)
+ goto out;
+
+ ret = xt_register_match(&multiport_match_v1);
+ if (ret)
+ goto out_unreg_multi_v0;
+
+ ret = xt_register_match(&multiport6_match);
+ if (ret)
+ goto out_unreg_multi_v1;
+
+ ret = xt_register_match(&multiport6_match_v1);
+ if (ret)
+ goto out_unreg_multi6_v0;
+
+ return ret;
+
+out_unreg_multi6_v0:
+ xt_unregister_match(&multiport6_match);
+out_unreg_multi_v1:
+ xt_unregister_match(&multiport_match_v1);
+out_unreg_multi_v0:
+ xt_unregister_match(&multiport_match);
+out:
+ return ret;
+}
+
+static void __exit xt_multiport_fini(void)
+{
+ xt_unregister_match(&multiport_match);
+ xt_unregister_match(&multiport_match_v1);
+ xt_unregister_match(&multiport6_match);
+ xt_unregister_match(&multiport6_match_v1);
+}
+
+module_init(xt_multiport_init);
+module_exit(xt_multiport_fini);
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 1099cb005fcc3..a3aa62fbda6f1 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -71,7 +71,7 @@ match_policy_in(const struct sk_buff *skb, const struct xt_policy_info *info,
return 0;
e = &info->pol[pos];
- if (match_xfrm_state(sp->x[i].xvec, e, family)) {
+ if (match_xfrm_state(sp->xvec[i], e, family)) {
if (!strict)
return 1;
} else if (strict)
diff --git a/net/socket.c b/net/socket.c
index b13042f68c022..b807f360e02cc 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1418,7 +1418,8 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int _
newfd = sock_alloc_fd(&newfile);
if (unlikely(newfd < 0)) {
err = newfd;
- goto out_release;
+ sock_release(newsock);
+ goto out_put;
}
err = sock_attach_fd(newsock, newfile);
@@ -1455,10 +1456,8 @@ out_put:
out:
return err;
out_fd:
- put_filp(newfile);
+ fput(newfile);
put_unused_fd(newfd);
-out_release:
- sock_release(newsock);
goto out_put;
}
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 2407a7072327f..b54971059f164 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -18,7 +18,7 @@ void __secpath_destroy(struct sec_path *sp)
{
int i;
for (i = 0; i < sp->len; i++)
- xfrm_state_put(sp->x[i].xvec);
+ xfrm_state_put(sp->xvec[i]);
kmem_cache_free(secpath_cachep, sp);
}
EXPORT_SYMBOL(__secpath_destroy);
@@ -37,7 +37,7 @@ struct sec_path *secpath_dup(struct sec_path *src)
memcpy(sp, src, sizeof(*sp));
for (i = 0; i < sp->len; i++)
- xfrm_state_hold(sp->x[i].xvec);
+ xfrm_state_hold(sp->xvec[i]);
}
atomic_set(&sp->refcnt, 1);
return sp;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f5eae9febd26a..c3725fe2a8fba 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -943,9 +943,9 @@ xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
} else
start = -1;
for (; idx < sp->len; idx++) {
- if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))
+ if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
return ++idx;
- if (sp->x[idx].xvec->props.mode)
+ if (sp->xvec[idx]->props.mode)
break;
}
return start;
@@ -968,7 +968,7 @@ EXPORT_SYMBOL(xfrm_decode_session);
static inline int secpath_has_tunnel(struct sec_path *sp, int k)
{
for (; k < sp->len; k++) {
- if (sp->x[k].xvec->props.mode)
+ if (sp->xvec[k]->props.mode)
return 1;
}
@@ -994,8 +994,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
int i;
for (i=skb->sp->len-1; i>=0; i--) {
- struct sec_decap_state *xvec = &(skb->sp->x[i]);
- if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
+ struct xfrm_state *x = skb->sp->xvec[i];
+ if (!xfrm_selector_match(&x->sel, &fl, family))
return 0;
}
}
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 77caf43a31090..adfdce7499d1c 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -57,18 +57,12 @@ static struct snd_card *card_list[SNDRV_CARDS];
/*
* prototypes
*/
-static void pdacf_config(dev_link_t *link);
+static int pdacf_config(struct pcmcia_device *link);
static void snd_pdacf_detach(struct pcmcia_device *p_dev);
-static void pdacf_release(dev_link_t *link)
+static void pdacf_release(struct pcmcia_device *link)
{
- if (link->state & DEV_CONFIG) {
- /* release cs resources */
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
- }
+ pcmcia_disable_device(link);
}
/*
@@ -76,7 +70,7 @@ static void pdacf_release(dev_link_t *link)
*/
static int snd_pdacf_free(struct snd_pdacf *pdacf)
{
- dev_link_t *link = &pdacf->link;
+ struct pcmcia_device *link = pdacf->p_dev;
pdacf_release(link);
@@ -96,10 +90,9 @@ static int snd_pdacf_dev_free(struct snd_device *device)
/*
* snd_pdacf_attach - attach callback for cs
*/
-static int snd_pdacf_attach(struct pcmcia_device *p_dev)
+static int snd_pdacf_probe(struct pcmcia_device *link)
{
int i;
- dev_link_t *link; /* Info for cardmgr */
struct snd_pdacf *pdacf;
struct snd_card *card;
static struct snd_device_ops ops = {
@@ -139,7 +132,7 @@ static int snd_pdacf_attach(struct pcmcia_device *p_dev)
pdacf->index = i;
card_list[i] = card;
- link = &pdacf->link;
+ pdacf->p_dev = link;
link->priv = pdacf;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -156,13 +149,7 @@ static int snd_pdacf_attach(struct pcmcia_device *p_dev)
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
- /* Chain drivers */
- link->next = NULL;
-
- link->handle = p_dev;
- pdacf_config(link);
-
- return 0;
+ return pdacf_config(link);
}
@@ -209,9 +196,8 @@ static int snd_pdacf_assign_resources(struct snd_pdacf *pdacf, int port, int irq
/*
* snd_pdacf_detach - detach callback for cs
*/
-static void snd_pdacf_detach(struct pcmcia_device *p_dev)
+static void snd_pdacf_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct snd_pdacf *chip = link->priv;
snd_printdd(KERN_DEBUG "pdacf_detach called\n");
@@ -230,13 +216,11 @@ static void snd_pdacf_detach(struct pcmcia_device *p_dev)
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void pdacf_config(dev_link_t *link)
+static int pdacf_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct snd_pdacf *pdacf = link->priv;
tuple_t tuple;
cisparse_t *parse = NULL;
- config_info_t conf;
u_short buf[32];
int last_fn, last_ret;
@@ -244,7 +228,7 @@ static void pdacf_config(dev_link_t *link)
parse = kmalloc(sizeof(*parse), GFP_KERNEL);
if (! parse) {
snd_printk(KERN_ERR "pdacf_config: cannot allocate\n");
- return;
+ return -ENOMEM;
}
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
@@ -252,71 +236,51 @@ static void pdacf_config(dev_link_t *link)
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
link->conf.ConfigBase = parse->config.base;
link->conf.ConfigIndex = 0x5;
kfree(parse);
- CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
- link->conf.Vcc = conf.Vcc;
-
- /* Configure card */
- link->state |= DEV_CONFIG;
-
- CS_CHECK(RequestIO, pcmcia_request_io(handle, &link->io));
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
goto failed;
- link->dev = &pdacf->node;
- link->state &= ~DEV_CONFIG_PENDING;
- return;
+ link->dev_node = &pdacf->node;
+ return 0;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
+ pcmcia_disable_device(link);
+ return -ENODEV;
}
#ifdef CONFIG_PM
-static int pdacf_suspend(struct pcmcia_device *dev)
+static int pdacf_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
struct snd_pdacf *chip = link->priv;
snd_printdd(KERN_DEBUG "SUSPEND\n");
- link->state |= DEV_SUSPEND;
if (chip) {
snd_printdd(KERN_DEBUG "snd_pdacf_suspend calling\n");
snd_pdacf_suspend(chip, PMSG_SUSPEND);
}
- snd_printdd(KERN_DEBUG "RESET_PHYSICAL\n");
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
-
return 0;
}
-static int pdacf_resume(struct pcmcia_device *dev)
+static int pdacf_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
struct snd_pdacf *chip = link->priv;
snd_printdd(KERN_DEBUG "RESUME\n");
- link->state &= ~DEV_SUSPEND;
-
- snd_printdd(KERN_DEBUG "CARD_RESET\n");
- if (DEV_OK(link)) {
- snd_printdd(KERN_DEBUG "requestconfig...\n");
- pcmcia_request_configuration(link->handle, &link->conf);
+ if (pcmcia_dev_present(link)) {
if (chip) {
snd_printdd(KERN_DEBUG "calling snd_pdacf_resume\n");
snd_pdacf_resume(chip);
@@ -343,7 +307,7 @@ static struct pcmcia_driver pdacf_cs_driver = {
.drv = {
.name = "snd-pdaudiocf",
},
- .probe = snd_pdacf_attach,
+ .probe = snd_pdacf_probe,
.remove = snd_pdacf_detach,
.id_table = snd_pdacf_ids,
#ifdef CONFIG_PM
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.h b/sound/pcmcia/pdaudiocf/pdaudiocf.h
index 2744f189a6132..9a14a4f64bd32 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.h
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.h
@@ -116,7 +116,7 @@ struct snd_pdacf {
void *pcm_area;
/* pcmcia stuff */
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
};
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 66900d20a42f3..7e0cda2b6ef9a 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -59,15 +59,9 @@ static unsigned int card_alloc;
/*
*/
-static void vxpocket_release(dev_link_t *link)
+static void vxpocket_release(struct pcmcia_device *link)
{
- if (link->state & DEV_CONFIG) {
- /* release cs resources */
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
- }
+ pcmcia_disable_device(link);
}
/*
@@ -132,9 +126,9 @@ static struct snd_vx_hardware vxp440_hw = {
/*
* create vxpocket instance
*/
-static struct snd_vxpocket *snd_vxpocket_new(struct snd_card *card, int ibl)
+static struct snd_vxpocket *snd_vxpocket_new(struct snd_card *card, int ibl,
+ struct pcmcia_device *link)
{
- dev_link_t *link; /* Info for cardmgr */
struct vx_core *chip;
struct snd_vxpocket *vxp;
static struct snd_device_ops ops = {
@@ -154,7 +148,7 @@ static struct snd_vxpocket *snd_vxpocket_new(struct snd_card *card, int ibl)
vxp = (struct snd_vxpocket *)chip;
- link = &vxp->link;
+ vxp->p_dev = link;
link->priv = chip;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -167,7 +161,6 @@ static struct snd_vxpocket *snd_vxpocket_new(struct snd_card *card, int ibl)
link->irq.Instance = chip;
link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
link->conf.Present = PRESENT_OPTION;
@@ -215,9 +208,8 @@ static int snd_vxpocket_assign_resources(struct vx_core *chip, int port, int irq
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-static void vxpocket_config(dev_link_t *link)
+static int vxpocket_config(struct pcmcia_device *link)
{
- client_handle_t handle = link->handle;
struct vx_core *chip = link->priv;
struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
tuple_t tuple;
@@ -229,24 +221,24 @@ static void vxpocket_config(dev_link_t *link)
parse = kmalloc(sizeof(*parse), GFP_KERNEL);
if (! parse) {
snd_printk(KERN_ERR "vx: cannot allocate\n");
- return;
+ return -ENOMEM;
}
tuple.Attributes = 0;
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleDataMax = sizeof(buf);
tuple.TupleOffset = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
link->conf.ConfigBase = parse->config.base;
link->conf.Present = parse->config.rmask[0];
/* redefine hardware record according to the VERSION1 string */
tuple.DesiredTuple = CISTPL_VERS_1;
- CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
- CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
- CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, parse));
+ CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
+ CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
+ CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
if (! strcmp(parse->version_1.str + parse->version_1.ofs[1], "VX-POCKET")) {
snd_printdd("VX-pocket is detected\n");
} else {
@@ -257,67 +249,50 @@ static void vxpocket_config(dev_link_t *link)
strcpy(chip->card->driver, vxp440_hw.name);
}
- /* Configure card */
- link->state |= DEV_CONFIG;
-
- CS_CHECK(RequestIO, pcmcia_request_io(handle, &link->io));
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
+ CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
- chip->dev = &handle_to_dev(link->handle);
+ chip->dev = &handle_to_dev(link);
snd_card_set_dev(chip->card, chip->dev);
if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
goto failed;
- link->dev = &vxp->node;
- link->state &= ~DEV_CONFIG_PENDING;
+ link->dev_node = &vxp->node;
kfree(parse);
- return;
+ return 9;
cs_failed:
- cs_error(link->handle, last_fn, last_ret);
+ cs_error(link, last_fn, last_ret);
failed:
- pcmcia_release_configuration(link->handle);
- pcmcia_release_io(link->handle, &link->io);
- pcmcia_release_irq(link->handle, &link->irq);
- link->state &= ~DEV_CONFIG;
+ pcmcia_disable_device(link);
kfree(parse);
+ return -ENODEV;
}
#ifdef CONFIG_PM
-static int vxp_suspend(struct pcmcia_device *dev)
+static int vxp_suspend(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
struct vx_core *chip = link->priv;
snd_printdd(KERN_DEBUG "SUSPEND\n");
- link->state |= DEV_SUSPEND;
if (chip) {
snd_printdd(KERN_DEBUG "snd_vx_suspend calling\n");
snd_vx_suspend(chip, PMSG_SUSPEND);
}
- snd_printdd(KERN_DEBUG "RESET_PHYSICAL\n");
- if (link->state & DEV_CONFIG)
- pcmcia_release_configuration(link->handle);
return 0;
}
-static int vxp_resume(struct pcmcia_device *dev)
+static int vxp_resume(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(dev);
struct vx_core *chip = link->priv;
snd_printdd(KERN_DEBUG "RESUME\n");
- link->state &= ~DEV_SUSPEND;
-
- snd_printdd(KERN_DEBUG "CARD_RESET\n");
- if (DEV_OK(link)) {
+ if (pcmcia_dev_present(link)) {
//struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
- snd_printdd(KERN_DEBUG "requestconfig...\n");
- pcmcia_request_configuration(link->handle, &link->conf);
if (chip) {
snd_printdd(KERN_DEBUG "calling snd_vx_resume\n");
snd_vx_resume(chip);
@@ -333,7 +308,7 @@ static int vxp_resume(struct pcmcia_device *dev)
/*
*/
-static int vxpocket_attach(struct pcmcia_device *p_dev)
+static int vxpocket_probe(struct pcmcia_device *p_dev)
{
struct snd_card *card;
struct snd_vxpocket *vxp;
@@ -358,7 +333,7 @@ static int vxpocket_attach(struct pcmcia_device *p_dev)
return -ENOMEM;
}
- vxp = snd_vxpocket_new(card, ibl[i]);
+ vxp = snd_vxpocket_new(card, ibl[i], p_dev);
if (! vxp) {
snd_card_free(card);
return -ENODEV;
@@ -368,20 +343,13 @@ static int vxpocket_attach(struct pcmcia_device *p_dev)
vxp->index = i;
card_alloc |= 1 << i;
- /* Chain drivers */
- vxp->link.next = NULL;
-
- vxp->link.handle = p_dev;
- vxp->link.state |= DEV_PRESENT | DEV_CONFIG_PENDING;
- p_dev->instance = &vxp->link;
- vxpocket_config(&vxp->link);
+ vxp->p_dev = p_dev;
- return 0;
+ return vxpocket_config(p_dev);
}
-static void vxpocket_detach(struct pcmcia_device *p_dev)
+static void vxpocket_detach(struct pcmcia_device *link)
{
- dev_link_t *link = dev_to_instance(p_dev);
struct snd_vxpocket *vxp;
struct vx_core *chip;
@@ -413,7 +381,7 @@ static struct pcmcia_driver vxp_cs_driver = {
.drv = {
.name = "snd-vxpocket",
},
- .probe = vxpocket_attach,
+ .probe = vxpocket_probe,
.remove = vxpocket_detach,
.id_table = vxp_ids,
#ifdef CONFIG_PM
diff --git a/sound/pcmcia/vx/vxpocket.h b/sound/pcmcia/vx/vxpocket.h
index 67efae3f6c8d1..27ea002294c02 100644
--- a/sound/pcmcia/vx/vxpocket.h
+++ b/sound/pcmcia/vx/vxpocket.h
@@ -42,7 +42,7 @@ struct snd_vxpocket {
int index; /* card index */
/* pcmcia stuff */
- dev_link_t link;
+ struct pcmcia_device *p_dev;
dev_node_t node;
};