diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt new file mode 100644 index 0000000..cb61516 --- /dev/null +++ b/Documentation/hwlat_detector.txt @@ -0,0 +1,64 @@ +Introduction: +------------- + +The module hwlat_detector is a special purpose kernel module that is used to +detect large system latencies induced by the behavior of certain underlying +hardware or firmware, independent of Linux itself. The code was developed +originally to detect SMIs (System Management Interrupts) on x86 systems, +however there is nothing x86 specific about this patchset. It was +originally written for use by the "RT" patch since the Real Time +kernel is highly latency sensitive. + +SMIs are usually not serviced by the Linux kernel, which typically does not +even know that they are occuring. SMIs are instead are set up by BIOS code +and are serviced by BIOS code, usually for "critical" events such as +management of thermal sensors and fans. Sometimes though, SMIs are used for +other tasks and those tasks can spend an inordinate amount of time in the +handler (sometimes measured in milliseconds). Obviously this is a problem if +you are trying to keep event service latencies down in the microsecond range. + +The hardware latency detector works by hogging all of the cpus for configurable +amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter +for some period, then looking for gaps in the TSC data. Any gap indicates a +time when the polling was interrupted and since the machine is stopped and +interrupts turned off the only thing that could do that would be an SMI. + +Note that the SMI detector should *NEVER* be used in a production environment. +It is intended to be run manually to determine if the hardware platform has a +problem with long system firmware service routines. + +Usage: +------ + +Loading the module hwlat_detector passing the parameter "enabled=1" (or by +setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only +step required to start the hwlat_detector. It is possible to redefine the +threshold in microseconds (us) above which latency spikes will be taken +into account (parameter "threshold="). + +Example: + + # modprobe hwlat_detector enabled=1 threshold=100 + +After the module is loaded, it creates a directory named "hwlat_detector" under +the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary +to have debugfs mounted, which might be on /sys/debug on your system. + +The /debug/hwlat_detector interface contains the following files: + +count - number of latency spikes observed since last reset +enable - a global enable/disable toggle (0/1), resets count +max - maximum hardware latency actually observed (usecs) +sample - a pipe from which to read current raw sample data + in the format + (can be opened O_NONBLOCK for a single sample) +threshold - minimum latency value to be considered (usecs) +width - time period to sample with CPUs held (usecs) + must be less than the total window size (enforced) +window - total period of sampling, width being inside (usecs) + +By default we will set width to 500,000 and window to 1,000,000, meaning that +we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we +observe any latencies that exceed the threshold (initially 100 usecs), +then we write to a global sample ring buffer of 8K samples, which is +consumed by reading from the "sample" (pipe) debugfs file interface. diff --git a/Documentation/sound/alsa/Procfile.txt b/Documentation/sound/alsa/Procfile.txt index 381908d..719a819 100644 --- a/Documentation/sound/alsa/Procfile.txt +++ b/Documentation/sound/alsa/Procfile.txt @@ -101,6 +101,8 @@ card*/pcm*/xrun_debug bit 0 = Enable XRUN/jiffies debug messages bit 1 = Show stack trace at XRUN / jiffies check bit 2 = Enable additional jiffies check + bit 3 = Log hwptr update at each period interrupt + bit 4 = Log hwptr update at each snd_pcm_update_hw_ptr() When the bit 0 is set, the driver will show the messages to kernel log when an xrun is detected. The debug message is @@ -117,6 +119,9 @@ card*/pcm*/xrun_debug buggy) hardware that doesn't give smooth pointer updates. This feature is enabled via the bit 2. + Bits 3 and 4 are for logging the hwptr records. Note that + these will give flood of kernel messages. + card*/pcm*/sub*/info The general information of this PCM sub-stream. diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx index 014d255..68c236c 100644 --- a/Documentation/video4linux/CARDLIST.em28xx +++ b/Documentation/video4linux/CARDLIST.em28xx @@ -20,7 +20,7 @@ 19 -> EM2860/SAA711X Reference Design (em2860) 20 -> AMD ATI TV Wonder HD 600 (em2880) [0438:b002] 21 -> eMPIA Technology, Inc. GrabBeeX+ Video Encoder (em2800) [eb1a:2801] - 22 -> Unknown EM2750/EM2751 webcam grabber (em2750) [eb1a:2750,eb1a:2751] + 22 -> EM2710/EM2750/EM2751 webcam grabber (em2750) [eb1a:2750,eb1a:2751] 23 -> Huaqi DLCW-130 (em2750) 24 -> D-Link DUB-T210 TV Tuner (em2820/em2840) [2001:f112] 25 -> Gadmei UTV310 (em2820/em2840) diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt index 2bcf788..573f95b 100644 --- a/Documentation/video4linux/gspca.txt +++ b/Documentation/video4linux/gspca.txt @@ -44,7 +44,9 @@ zc3xx 0458:7007 Genius VideoCam V2 zc3xx 0458:700c Genius VideoCam V3 zc3xx 0458:700f Genius VideoCam Web V2 sonixj 0458:7025 Genius Eye 311Q +sn9c20x 0458:7029 Genius Look 320s sonixj 0458:702e Genius Slim 310 NB +sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650) sonixj 045e:00f5 MicroSoft VX3000 sonixj 045e:00f7 MicroSoft VX1000 ov519 045e:028c Micro$oft xbox cam @@ -282,6 +284,28 @@ sonixj 0c45:613a Microdia Sonix PC Camera sonixj 0c45:613b Surfer SN-206 sonixj 0c45:613c Sonix Pccam168 sonixj 0c45:6143 Sonix Pccam168 +sn9c20x 0c45:6240 PC Camera (SN9C201 + MT9M001) +sn9c20x 0c45:6242 PC Camera (SN9C201 + MT9M111) +sn9c20x 0c45:6248 PC Camera (SN9C201 + OV9655) +sn9c20x 0c45:624e PC Camera (SN9C201 + SOI968) +sn9c20x 0c45:624f PC Camera (SN9C201 + OV9650) +sn9c20x 0c45:6251 PC Camera (SN9C201 + OV9650) +sn9c20x 0c45:6253 PC Camera (SN9C201 + OV9650) +sn9c20x 0c45:6260 PC Camera (SN9C201 + OV7670) +sn9c20x 0c45:6270 PC Camera (SN9C201 + MT9V011/MT9V111/MT9V112) +sn9c20x 0c45:627b PC Camera (SN9C201 + OV7660) +sn9c20x 0c45:627c PC Camera (SN9C201 + HV7131R) +sn9c20x 0c45:627f PC Camera (SN9C201 + OV9650) +sn9c20x 0c45:6280 PC Camera (SN9C202 + MT9M001) +sn9c20x 0c45:6282 PC Camera (SN9C202 + MT9M111) +sn9c20x 0c45:6288 PC Camera (SN9C202 + OV9655) +sn9c20x 0c45:628e PC Camera (SN9C202 + SOI968) +sn9c20x 0c45:628f PC Camera (SN9C202 + OV9650) +sn9c20x 0c45:62a0 PC Camera (SN9C202 + OV7670) +sn9c20x 0c45:62b0 PC Camera (SN9C202 + MT9V011/MT9V111/MT9V112) +sn9c20x 0c45:62b3 PC Camera (SN9C202 + OV9655) +sn9c20x 0c45:62bb PC Camera (SN9C202 + OV7660) +sn9c20x 0c45:62bc PC Camera (SN9C202 + HV7131R) sunplus 0d64:0303 Sunplus FashionCam DXG etoms 102c:6151 Qcam Sangha CIF etoms 102c:6251 Qcam xxxxxx VGA @@ -290,6 +314,7 @@ spca561 10fd:7e50 FlyCam Usb 100 zc3xx 10fd:8050 Typhoon Webshot II USB 300k ov534 1415:2000 Sony HD Eye for PS3 (SLEH 00201) pac207 145f:013a Trust WB-1300N +sn9c20x 145f:013d Trust WB-3600R vc032x 15b8:6001 HP 2.0 Megapixel vc032x 15b8:6002 HP 2.0 Megapixel rz406aa spca501 1776:501c Arowana 300K CMOS Camera @@ -300,4 +325,11 @@ spca500 2899:012c Toptro Industrial spca508 8086:0110 Intel Easy PC Camera spca500 8086:0630 Intel Pocket PC Camera spca506 99fa:8988 Grandtec V.cap +sn9c20x a168:0610 Dino-Lite Digital Microscope (SN9C201 + HV7131R) +sn9c20x a168:0611 Dino-Lite Digital Microscope (SN9C201 + HV7131R) +sn9c20x a168:0613 Dino-Lite Digital Microscope (SN9C201 + HV7131R) +sn9c20x a168:0618 Dino-Lite Digital Microscope (SN9C201 + HV7131R) +sn9c20x a168:0614 Dino-Lite Digital Microscope (SN9C201 + MT9M111) +sn9c20x a168:0615 Dino-Lite Digital Microscope (SN9C201 + MT9M111) +sn9c20x a168:0617 Dino-Lite Digital Microscope (SN9C201 + MT9M111) spca561 abcd:cdee Petcam diff --git a/MAINTAINERS b/MAINTAINERS index ebc2691..93d5e1f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2623,6 +2623,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git S: Maintained F: drivers/media/video/gspca/ +HARDWARE LATENCY DETECTOR +P: Jon Masters +M: jcm@jonmasters.org +W: http://www.kernel.org/pub/linux/kernel/people/jcm/hwlat_detector/ +S: Supported +L: linux-kernel@vger.kernel.org +F: Documentation/hwlat_detector.txt +F: drivers/misc/hwlat_detector.c + HARDWARE MONITORING L: lm-sensors@lm-sensors.org W: http://www.lm-sensors.org/ diff --git a/Makefile b/Makefile index 063d738..7602ca6 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 31 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc4-rt1 NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* diff --git a/arch/Kconfig b/arch/Kconfig index 99193b1..1b28306 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -33,6 +33,11 @@ config OPROFILE_IBS config HAVE_OPROFILE bool +config PROFILE_NMI + bool + depends on OPROFILE + default y + config KPROBES bool "Kprobes" depends on KALLSYMS && MODULES diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h index 1570c0b..55f4f13 100644 --- a/arch/alpha/include/asm/rwsem.h +++ b/arch/alpha/include/asm/rwsem.h @@ -18,15 +18,18 @@ struct rwsem_waiter; -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_read_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_write_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *); +extern struct rw_anon_semaphore * +rwsem_downgrade_wake(struct rw_anon_semaphore *sem); /* * the semaphore definition */ -struct rw_semaphore { +struct rw_anon_semaphore { long count; #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L #define RWSEM_ACTIVE_BIAS 0x0000000000000001L @@ -38,6 +41,31 @@ struct rw_semaphore { struct list_head wait_list; }; +#define __RWSEM_ANON_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) } + +#define DECLARE_ANON_RWSEM(name) \ + struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name) + +static inline void init_anon_rwsem(struct rw_anon_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + +static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) +{ + return (sem->count != 0); +} + +struct rw_semaphore { + long count; + spinlock_t wait_lock; + struct list_head wait_list; +}; + #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ LIST_HEAD_INIT((name).wait_list) } @@ -47,12 +75,15 @@ struct rw_semaphore { static inline void init_rwsem(struct rw_semaphore *sem) { - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); + init_anon_rwsem((struct rw_anon_semaphore *)sem); } -static inline void __down_read(struct rw_semaphore *sem) +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return (sem->count != 0); +} + +static inline void __down_read(struct rw_anon_semaphore *sem) { long oldcount; #ifndef CONFIG_SMP @@ -79,7 +110,7 @@ static inline void __down_read(struct rw_semaphore *sem) /* * trylock for reading -- returns 1 if successful, 0 if contention */ -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct rw_anon_semaphore *sem) { long old, new, res; @@ -94,7 +125,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) return res >= 0 ? 1 : 0; } -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct rw_anon_semaphore *sem) { long oldcount; #ifndef CONFIG_SMP @@ -121,7 +152,7 @@ static inline void __down_write(struct rw_semaphore *sem) /* * trylock for writing -- returns 1 if successful, 0 if contention */ -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct rw_anon_semaphore *sem) { long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); @@ -130,7 +161,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) return 0; } -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct rw_anon_semaphore *sem) { long oldcount; #ifndef CONFIG_SMP @@ -155,7 +186,7 @@ static inline void __up_read(struct rw_semaphore *sem) rwsem_wake(sem); } -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct rw_anon_semaphore *sem) { long count; #ifndef CONFIG_SMP @@ -184,7 +215,7 @@ static inline void __up_write(struct rw_semaphore *sem) /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct rw_anon_semaphore *sem) { long oldcount; #ifndef CONFIG_SMP @@ -208,7 +239,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) rwsem_downgrade_wake(sem); } -static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(long val, struct rw_anon_semaphore *sem) { #ifndef CONFIG_SMP sem->count += val; @@ -227,7 +258,7 @@ static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem) #endif } -static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) +static inline long rwsem_atomic_update(long val, struct rw_anon_semaphore *sem) { #ifndef CONFIG_SMP sem->count += val; @@ -250,10 +281,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) #endif } -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* __KERNEL__ */ #endif /* _ALPHA_RWSEM_H */ diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h index c136365..4286675 100644 --- a/arch/alpha/include/asm/tlb.h +++ b/arch/alpha/include/asm/tlb.h @@ -9,7 +9,7 @@ #include -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) -#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) #endif diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index cc78346..2e0b75e 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v) #endif if (irq < ACTUAL_NR_IRQS) { - spin_lock_irqsave(&irq_desc[irq].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[irq].lock, flags); action = irq_desc[irq].action; if (!action) goto unlock; @@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } else if (irq == ACTUAL_NR_IRQS) { #ifdef CONFIG_SMP seq_puts(p, "IPI: "); diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index b04e2cb..991a967 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -106,7 +106,7 @@ irqreturn_t timer_interrupt(int irq, void *dev) profile_tick(CPU_PROFILING); #endif - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); /* * Calculate how many ticks have passed since the last update, @@ -136,7 +136,7 @@ irqreturn_t timer_interrupt(int irq, void *dev) state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0); } - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); #ifndef CONFIG_SMP while (nticks--) @@ -416,14 +416,14 @@ do_gettimeofday(struct timeval *tv) unsigned long delta_cycles, delta_usec, partial_tick; do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); + seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags); delta_cycles = rpcc() - state.last_time; sec = xtime.tv_sec; usec = (xtime.tv_nsec / 1000); partial_tick = state.partial_tick; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags)); #ifdef CONFIG_SMP /* Until and unless we figure out how to get cpu cycle counters @@ -470,7 +470,7 @@ do_settimeofday(struct timespec *tv) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irq(&xtime_lock); + write_atomic_seqlock_irq(&xtime_lock); /* The offset that is added into time in do_gettimeofday above must be subtracted out here to keep a coherent view of the @@ -496,7 +496,7 @@ do_settimeofday(struct timespec *tv) ntp_clear(); - write_sequnlock_irq(&xtime_lock); + write_atomic_sequnlock_irq(&xtime_lock); clock_was_set(); return 0; } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index aef63c8..eac1a92 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -962,18 +962,7 @@ config LOCAL_TIMERS accounting to be spread across the timer interval, preventing a "thundering herd" at every timer tick. -config PREEMPT - bool "Preemptible Kernel (EXPERIMENTAL)" - depends on EXPERIMENTAL - help - This option reduces the latency of the kernel when reacting to - real-time or interactive events by allowing a low priority process to - be preempted even if it is in kernel mode executing a system call. - This allows applications to run more reliably even when the system is - under load. - - Say Y here if you are building a kernel for a desktop, embedded - or real-time system. Say N if you are unsure. +source kernel/Kconfig.preempt config HZ int diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 7edf353..c682ef4 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -31,18 +31,18 @@ #define DMA_MODE_CASCADE 0xc0 #define DMA_AUTOINIT 0x10 -extern spinlock_t dma_spin_lock; +extern atomic_spinlock_t dma_spin_lock; static inline unsigned long claim_dma_lock(void) { unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); + atomic_spin_lock_irqsave(&dma_spin_lock, flags); return flags; } static inline void release_dma_lock(unsigned long flags) { - spin_unlock_irqrestore(&dma_spin_lock, flags); + atomic_spin_unlock_irqrestore(&dma_spin_lock, flags); } /* Clear the 'DMA Pointer Flip Flop'. diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index d65b2f5..e849ed9 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -60,6 +60,8 @@ #include #include +#include + #define __exception __attribute__((section(".exception.text"))) struct thread_info; diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 321c83e..dd667f2 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -40,17 +40,12 @@ struct mmu_gather { unsigned long range_end; }; -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); - -static inline struct mmu_gather * -tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) +static inline void +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned int full_mm_flush) { - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); - tlb->mm = mm; tlb->fullmm = full_mm_flush; - - return tlb; } static inline void @@ -61,8 +56,6 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) /* keep the page table cache within bounds */ check_pgt_cache(); - - put_cpu_var(mmu_gathers); } /* @@ -102,8 +95,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) } #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) -#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) +#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) +#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define tlb_migrate_finish(mm) do { } while (0) diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index 7d5b9fb..7a6f3d2 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c @@ -21,7 +21,7 @@ #include -DEFINE_SPINLOCK(dma_spin_lock); +DEFINE_ATOMIC_SPINLOCK(dma_spin_lock); EXPORT_SYMBOL(dma_spin_lock); static dma_t *dma_chan[MAX_DMA_CHANNELS]; diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 366e509..0f529db 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -59,7 +59,8 @@ work_pending: b ret_slow_syscall @ Check work again work_resched: - bl schedule + bl __schedule + /* * "slow" syscall return path. "why" tells us if this was a real syscall. */ diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index b7c3490..6ea2a03 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { #ifdef CONFIG_ARCH_ACORN show_fiq_list(p, v); @@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) } desc = irq_desc + irq; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (iflags & IRQF_VALID) desc->status &= ~IRQ_NOREQUEST; @@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) desc->status &= ~IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) desc->status &= ~IRQ_NOAUTOEN; - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } void __init init_IRQ(void) @@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) { pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); - spin_lock_irq(&desc->lock); + atomic_spin_lock_irq(&desc->lock); desc->chip->set_affinity(irq, cpumask_of(cpu)); - spin_unlock_irq(&desc->lock); + atomic_spin_unlock_irq(&desc->lock); } /* diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 39196df..fcecbb2 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -174,9 +174,11 @@ void cpu_idle(void) } leds_event(led_idle_end); tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); + local_irq_disable(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); + local_irq_enable(); } } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 93bb424..207fced 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -627,6 +627,14 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) siginfo_t info; int signr; +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif + /* * We want the common case to go fast, which * is why we may in certain cases get here from diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index de885fd..d825d4e 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -451,17 +451,17 @@ void __cpuinit percpu_timer_setup(void) local_timer_setup(evt); } -static DEFINE_SPINLOCK(stop_lock); +static DEFINE_ATOMIC_SPINLOCK(stop_lock); /* * ipi_cpu_stop - handle IPI from smp_send_stop() */ static void ipi_cpu_stop(unsigned int cpu) { - spin_lock(&stop_lock); + atomic_spin_lock(&stop_lock); printk(KERN_CRIT "CPU%u: stopping\n", cpu); dump_stack(); - spin_unlock(&stop_lock); + atomic_spin_unlock(&stop_lock); set_cpu_online(cpu, false); diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 4cdc4a0..8a545e2 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -244,11 +244,11 @@ void do_gettimeofday(struct timeval *tv) unsigned long usec, sec; do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); + seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags); usec = system_timer->offset(); sec = xtime.tv_sec; usec += xtime.tv_nsec / 1000; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags)); /* usec may have gone up a lot: be safe */ while (usec >= 1000000) { @@ -270,7 +270,7 @@ int do_settimeofday(struct timespec *tv) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irq(&xtime_lock); + write_atomic_seqlock_irq(&xtime_lock); /* * This is revolting. We need to set "xtime" correctly. However, the * value in this location is the value at the most recent update of @@ -286,7 +286,7 @@ int do_settimeofday(struct timespec *tv) set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); ntp_clear(); - write_sequnlock_irq(&xtime_lock); + write_atomic_sequnlock_irq(&xtime_lock); clock_was_set(); return 0; } @@ -336,9 +336,9 @@ void timer_tick(void) profile_tick(CPU_PROFILING); do_leds(); do_set_rtc(); - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); do_timer(1); - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 57eb0f6..0cf0ae3 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -239,7 +239,7 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p } } -DEFINE_SPINLOCK(die_lock); +DEFINE_ATOMIC_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. @@ -251,12 +251,12 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) oops_enter(); console_verbose(); - spin_lock_irq(&die_lock); + atomic_spin_lock_irq(&die_lock); bust_spinlocks(1); __die(str, err, thread, regs); bust_spinlocks(0); add_taint(TAINT_DIE); - spin_unlock_irq(&die_lock); + atomic_spin_unlock_irq(&die_lock); if (in_interrupt()) panic("Fatal exception in interrupt"); @@ -282,24 +282,24 @@ void arm_notify_die(const char *str, struct pt_regs *regs, } static LIST_HEAD(undef_hook); -static DEFINE_SPINLOCK(undef_lock); +static DEFINE_ATOMIC_SPINLOCK(undef_lock); void register_undef_hook(struct undef_hook *hook) { unsigned long flags; - spin_lock_irqsave(&undef_lock, flags); + atomic_spin_lock_irqsave(&undef_lock, flags); list_add(&hook->node, &undef_hook); - spin_unlock_irqrestore(&undef_lock, flags); + atomic_spin_unlock_irqrestore(&undef_lock, flags); } void unregister_undef_hook(struct undef_hook *hook) { unsigned long flags; - spin_lock_irqsave(&undef_lock, flags); + atomic_spin_lock_irqsave(&undef_lock, flags); list_del(&hook->node); - spin_unlock_irqrestore(&undef_lock, flags); + atomic_spin_unlock_irqrestore(&undef_lock, flags); } static int call_undef_hook(struct pt_regs *regs, unsigned int instr) @@ -308,12 +308,12 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr) unsigned long flags; int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; - spin_lock_irqsave(&undef_lock, flags); + atomic_spin_lock_irqsave(&undef_lock, flags); list_for_each_entry(hook, &undef_hook, node) if ((instr & hook->instr_mask) == hook->instr_val && (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) fn = hook->fn; - spin_unlock_irqrestore(&undef_lock, flags); + atomic_spin_unlock_irqrestore(&undef_lock, flags); return fn ? fn(regs, instr) : 1; } diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c index f2236f0..cf609f8 100644 --- a/arch/arm/mach-at91/gpio.c +++ b/arch/arm/mach-at91/gpio.c @@ -375,12 +375,18 @@ static int gpio_irq_type(unsigned pin, unsigned type) } } +static void gpio_irq_ack_noop(unsigned int irq) +{ + /* Dummy function. */ +} + static struct irq_chip gpio_irqchip = { .name = "GPIO", .mask = gpio_irq_mask, .unmask = gpio_irq_unmask, .set_type = gpio_irq_type, .set_wake = gpio_irq_set_wake, + .ack = gpio_irq_ack_noop, }; static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) @@ -527,7 +533,7 @@ void __init at91_gpio_irq_setup(void) * shorter, and the AIC handles interrupts sanely. */ set_irq_chip(pin, &gpio_irqchip); - set_irq_handler(pin, handle_simple_irq); + set_irq_handler(pin, handle_edge_irq); set_irq_flags(pin, IRQF_VALID); } diff --git a/arch/arm/mach-footbridge/include/mach/hardware.h b/arch/arm/mach-footbridge/include/mach/hardware.h index 51dd902..2276ebf 100644 --- a/arch/arm/mach-footbridge/include/mach/hardware.h +++ b/arch/arm/mach-footbridge/include/mach/hardware.h @@ -86,7 +86,7 @@ #define CPLD_FLASH_WR_ENABLE 1 #ifndef __ASSEMBLY__ -extern spinlock_t nw_gpio_lock; +extern atomic_spinlock_t nw_gpio_lock; extern void nw_gpio_modify_op(unsigned int mask, unsigned int set); extern void nw_gpio_modify_io(unsigned int mask, unsigned int in); extern unsigned int nw_gpio_read(void); diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index ac7ffa6..574a57e 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c @@ -68,7 +68,7 @@ static inline void wb977_ww(int reg, int val) /* * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE */ -DEFINE_SPINLOCK(nw_gpio_lock); +DEFINE_ATOMIC_SPINLOCK(nw_gpio_lock); EXPORT_SYMBOL(nw_gpio_lock); static unsigned int current_gpio_op; @@ -327,9 +327,9 @@ static inline void wb977_init_gpio(void) /* * Set Group1/Group2 outputs */ - spin_lock_irqsave(&nw_gpio_lock, flags); + atomic_spin_lock_irqsave(&nw_gpio_lock, flags); nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN); - spin_unlock_irqrestore(&nw_gpio_lock, flags); + atomic_spin_unlock_irqrestore(&nw_gpio_lock, flags); } /* @@ -390,9 +390,9 @@ static void __init cpld_init(void) { unsigned long flags; - spin_lock_irqsave(&nw_gpio_lock, flags); + atomic_spin_lock_irqsave(&nw_gpio_lock, flags); nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE); - spin_unlock_irqrestore(&nw_gpio_lock, flags); + atomic_spin_unlock_irqrestore(&nw_gpio_lock, flags); } static unsigned char rwa_unlock[] __initdata = @@ -616,9 +616,9 @@ static int __init nw_hw_init(void) cpld_init(); rwa010_init(); - spin_lock_irqsave(&nw_gpio_lock, flags); + atomic_spin_lock_irqsave(&nw_gpio_lock, flags); nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS); - spin_unlock_irqrestore(&nw_gpio_lock, flags); + atomic_spin_unlock_irqrestore(&nw_gpio_lock, flags); } return 0; } diff --git a/arch/arm/mach-footbridge/netwinder-leds.c b/arch/arm/mach-footbridge/netwinder-leds.c index 00269fe..642a443 100644 --- a/arch/arm/mach-footbridge/netwinder-leds.c +++ b/arch/arm/mach-footbridge/netwinder-leds.c @@ -31,13 +31,13 @@ static char led_state; static char hw_led_state; -static DEFINE_SPINLOCK(leds_lock); +static DEFINE_ATOMIC_SPINLOCK(leds_lock); static void netwinder_leds_event(led_event_t evt) { unsigned long flags; - spin_lock_irqsave(&leds_lock, flags); + atomic_spin_lock_irqsave(&leds_lock, flags); switch (evt) { case led_start: @@ -117,12 +117,12 @@ static void netwinder_leds_event(led_event_t evt) break; } - spin_unlock_irqrestore(&leds_lock, flags); + atomic_spin_unlock_irqrestore(&leds_lock, flags); if (led_state & LED_STATE_ENABLED) { - spin_lock_irqsave(&nw_gpio_lock, flags); + atomic_spin_lock_irqsave(&nw_gpio_lock, flags); nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state); - spin_unlock_irqrestore(&nw_gpio_lock, flags); + atomic_spin_unlock_irqrestore(&nw_gpio_lock, flags); } } diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index a0f60e5..523b160 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -199,7 +199,7 @@ static struct amba_pl010_data integrator_uart_data = { #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_BASE) + INTEGRATOR_HDR_CTRL_OFFSET -static DEFINE_SPINLOCK(cm_lock); +static DEFINE_ATOMIC_SPINLOCK(cm_lock); /** * cm_control - update the CM_CTRL register. @@ -211,10 +211,10 @@ void cm_control(u32 mask, u32 set) unsigned long flags; u32 val; - spin_lock_irqsave(&cm_lock, flags); + atomic_spin_lock_irqsave(&cm_lock, flags); val = readl(CM_CTRL) & ~mask; writel(val | set, CM_CTRL); - spin_unlock_irqrestore(&cm_lock, flags); + atomic_spin_unlock_irqrestore(&cm_lock, flags); } EXPORT_SYMBOL(cm_control); diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index f1d72b2..50dcb35 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -162,7 +162,7 @@ * 7:2 register number * */ -static DEFINE_SPINLOCK(v3_lock); +static DEFINE_ATOMIC_SPINLOCK(v3_lock); #define PCI_BUS_NONMEM_START 0x00000000 #define PCI_BUS_NONMEM_SIZE SZ_256M @@ -283,7 +283,7 @@ static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where, unsigned long flags; u32 v; - spin_lock_irqsave(&v3_lock, flags); + atomic_spin_lock_irqsave(&v3_lock, flags); addr = v3_open_config_window(bus, devfn, where); switch (size) { @@ -301,7 +301,7 @@ static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where, } v3_close_config_window(); - spin_unlock_irqrestore(&v3_lock, flags); + atomic_spin_unlock_irqrestore(&v3_lock, flags); *val = v; return PCIBIOS_SUCCESSFUL; @@ -313,7 +313,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where, unsigned long addr; unsigned long flags; - spin_lock_irqsave(&v3_lock, flags); + atomic_spin_lock_irqsave(&v3_lock, flags); addr = v3_open_config_window(bus, devfn, where); switch (size) { @@ -334,7 +334,7 @@ static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where, } v3_close_config_window(); - spin_unlock_irqrestore(&v3_lock, flags); + atomic_spin_unlock_irqrestore(&v3_lock, flags); return PCIBIOS_SUCCESSFUL; } @@ -509,7 +509,7 @@ void __init pci_v3_preinit(void) hook_fault_code(8, v3_pci_fault, SIGBUS, "external abort on non-linefetch"); hook_fault_code(10, v3_pci_fault, SIGBUS, "external abort on non-linefetch"); - spin_lock_irqsave(&v3_lock, flags); + atomic_spin_lock_irqsave(&v3_lock, flags); /* * Unlock V3 registers, but only if they were previously locked. @@ -582,7 +582,7 @@ void __init pci_v3_preinit(void) printk(KERN_ERR "PCI: unable to grab PCI error " "interrupt: %d\n", ret); - spin_unlock_irqrestore(&v3_lock, flags); + atomic_spin_unlock_irqrestore(&v3_lock, flags); } void __init pci_v3_postinit(void) diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 70afcfe..ef50a20 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -54,7 +54,7 @@ unsigned long ixp4xx_pci_reg_base = 0; * these transactions are atomic or we will end up * with corrupt data on the bus or in a driver. */ -static DEFINE_SPINLOCK(ixp4xx_pci_lock); +static DEFINE_ATOMIC_SPINLOCK(ixp4xx_pci_lock); /* * Read from PCI config space @@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(ixp4xx_pci_lock); static void crp_read(u32 ad_cbe, u32 *data) { unsigned long flags; - spin_lock_irqsave(&ixp4xx_pci_lock, flags); + atomic_spin_lock_irqsave(&ixp4xx_pci_lock, flags); *PCI_CRP_AD_CBE = ad_cbe; *data = *PCI_CRP_RDATA; - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + atomic_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); } /* @@ -74,10 +74,10 @@ static void crp_read(u32 ad_cbe, u32 *data) static void crp_write(u32 ad_cbe, u32 data) { unsigned long flags; - spin_lock_irqsave(&ixp4xx_pci_lock, flags); + atomic_spin_lock_irqsave(&ixp4xx_pci_lock, flags); *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe; *PCI_CRP_WDATA = data; - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + atomic_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); } static inline int check_master_abort(void) @@ -101,7 +101,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data) int retval = 0; int i; - spin_lock_irqsave(&ixp4xx_pci_lock, flags); + atomic_spin_lock_irqsave(&ixp4xx_pci_lock, flags); *PCI_NP_AD = addr; @@ -118,7 +118,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data) if(check_master_abort()) retval = 1; - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + atomic_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); return retval; } @@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data) unsigned long flags; int retval = 0; - spin_lock_irqsave(&ixp4xx_pci_lock, flags); + atomic_spin_lock_irqsave(&ixp4xx_pci_lock, flags); *PCI_NP_AD = addr; @@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data) if(check_master_abort()) retval = 1; - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + atomic_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); return retval; } @@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data) unsigned long flags; int retval = 0; - spin_lock_irqsave(&ixp4xx_pci_lock, flags); + atomic_spin_lock_irqsave(&ixp4xx_pci_lock, flags); *PCI_NP_AD = addr; @@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data) if(check_master_abort()) retval = 1; - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + atomic_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); return retval; } diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c index feb0e54..6873b7b 100644 --- a/arch/arm/mach-ns9xxx/irq.c +++ b/arch/arm/mach-ns9xxx/irq.c @@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); BUG_ON(desc->status & IRQ_INPROGRESS); @@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) goto out_mask; desc->status |= IRQ_INPROGRESS; - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); @@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) * Maybe this function should go to kernel/irq/chip.c? */ note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; if (desc->status & IRQ_DISABLED) @@ -97,7 +97,7 @@ out_mask: /* ack unconditionally to unmask lower prio irqs */ desc->chip->ack(irq); - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); } #define handle_irq handle_prio_irq #endif diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c index ab5883b..0f0d555 100644 --- a/arch/arm/mach-sa1100/badge4.c +++ b/arch/arm/mach-sa1100/badge4.c @@ -240,15 +240,22 @@ void badge4_set_5V(unsigned subsystem, int on) /* detect on->off and off->on transitions */ if ((!old_5V_bitmap) && (badge4_5V_bitmap)) { /* was off, now on */ - printk(KERN_INFO "%s: enabling 5V supply rail\n", __func__); GPSR = BADGE4_GPIO_PCMEN5V; } else if ((old_5V_bitmap) && (!badge4_5V_bitmap)) { /* was on, now off */ - printk(KERN_INFO "%s: disabling 5V supply rail\n", __func__); GPCR = BADGE4_GPIO_PCMEN5V; } local_irq_restore(flags); + + /* detect on->off and off->on transitions */ + if ((!old_5V_bitmap) && (badge4_5V_bitmap)) { + /* was off, now on */ + printk(KERN_INFO "%s: enabling 5V supply rail\n", __FUNCTION__); + } else if ((old_5V_bitmap) && (!badge4_5V_bitmap)) { + /* was on, now off */ + printk(KERN_INFO "%s: disabling 5V supply rail\n", __FUNCTION__); + } } EXPORT_SYMBOL(badge4_set_5V); diff --git a/arch/arm/mach-shark/leds.c b/arch/arm/mach-shark/leds.c index c9e32de..6ae3314 100644 --- a/arch/arm/mach-shark/leds.c +++ b/arch/arm/mach-shark/leds.c @@ -36,7 +36,7 @@ static char led_state; static short hw_led_state; static short saved_state; -static DEFINE_SPINLOCK(leds_lock); +static DEFINE_ATOMIC_SPINLOCK(leds_lock); short sequoia_read(int addr) { outw(addr,0x24); @@ -52,7 +52,7 @@ static void sequoia_leds_event(led_event_t evt) { unsigned long flags; - spin_lock_irqsave(&leds_lock, flags); + atomic_spin_lock_irqsave(&leds_lock, flags); hw_led_state = sequoia_read(0x09); @@ -144,7 +144,7 @@ static void sequoia_leds_event(led_event_t evt) if (led_state & LED_STATE_ENABLED) sequoia_write(hw_led_state,0x09); - spin_unlock_irqrestore(&leds_lock, flags); + atomic_spin_unlock_irqrestore(&leds_lock, flags); } static int __init leds_init(void) diff --git a/arch/arm/mach-w90x900/mfp-w90p910.c b/arch/arm/mach-w90x900/mfp-w90p910.c index a3520fe..92adadf 100644 --- a/arch/arm/mach-w90x900/mfp-w90p910.c +++ b/arch/arm/mach-w90x900/mfp-w90p910.c @@ -34,7 +34,7 @@ #define GPSELEI0 (0x01 << 26) #define GPSELEI1 (0x01 << 27) -static DECLARE_MUTEX(mfp_sem); +static DEFINE_MUTEX(mfp_sem); void mfp_set_groupf(struct device *dev) { @@ -43,7 +43,7 @@ void mfp_set_groupf(struct device *dev) BUG_ON(!dev); - down(&mfp_sem); + mutex_lock(&mfp_sem); dev_id = dev_name(dev); @@ -56,7 +56,7 @@ void mfp_set_groupf(struct device *dev) __raw_writel(mfpen, REG_MFSEL); - up(&mfp_sem); + mutex_unlock(&mfp_sem); } EXPORT_SYMBOL(mfp_set_groupf); @@ -67,7 +67,7 @@ void mfp_set_groupc(struct device *dev) BUG_ON(!dev); - down(&mfp_sem); + mutex_lock(&mfp_sem); dev_id = dev_name(dev); @@ -86,7 +86,7 @@ void mfp_set_groupc(struct device *dev) __raw_writel(mfpen, REG_MFSEL); - up(&mfp_sem); + mutex_unlock(&mfp_sem); } EXPORT_SYMBOL(mfp_set_groupc); @@ -97,7 +97,7 @@ void mfp_set_groupi(struct device *dev, int gpio) BUG_ON(!dev); - down(&mfp_sem); + mutex_lock(&mfp_sem); dev_id = dev_name(dev); @@ -110,7 +110,7 @@ void mfp_set_groupi(struct device *dev, int gpio) __raw_writel(mfpen, REG_MFSEL); - up(&mfp_sem); + mutex_unlock(&mfp_sem); } EXPORT_SYMBOL(mfp_set_groupi); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index b480f1d..ade5628 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -26,19 +26,19 @@ #define CACHE_LINE_SIZE 32 static void __iomem *l2x0_base; -static DEFINE_SPINLOCK(l2x0_lock); +static DEFINE_ATOMIC_SPINLOCK(l2x0_lock); static inline void sync_writel(unsigned long val, unsigned long reg, unsigned long complete_mask) { unsigned long flags; - spin_lock_irqsave(&l2x0_lock, flags); + atomic_spin_lock_irqsave(&l2x0_lock, flags); writel(val, l2x0_base + reg); /* wait for the operation to complete */ while (readl(l2x0_base + reg) & complete_mask) ; - spin_unlock_irqrestore(&l2x0_lock, flags); + atomic_spin_unlock_irqrestore(&l2x0_lock, flags); } static inline void cache_sync(void) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index fc84fcc..072b7e9 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -14,7 +14,7 @@ #include #include -static DEFINE_SPINLOCK(cpu_asid_lock); +static DEFINE_ATOMIC_SPINLOCK(cpu_asid_lock); unsigned int cpu_last_asid = ASID_FIRST_VERSION; /* @@ -32,7 +32,7 @@ void __new_context(struct mm_struct *mm) { unsigned int asid; - spin_lock(&cpu_asid_lock); + atomic_spin_lock(&cpu_asid_lock); asid = ++cpu_last_asid; if (asid == 0) asid = cpu_last_asid = ASID_FIRST_VERSION; @@ -57,7 +57,7 @@ void __new_context(struct mm_struct *mm) dsb(); } } - spin_unlock(&cpu_asid_lock); + atomic_spin_unlock(&cpu_asid_lock); mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); mm->context.id = asid; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7370a71..8b77925 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -30,7 +30,7 @@ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) -static DEFINE_SPINLOCK(minicache_lock); +static DEFINE_ATOMIC_SPINLOCK(minicache_lock); /* * ARMv4 mini-dcache optimised copy_user_highpage @@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) __flush_dcache_page(page_mapping(from), from); - spin_lock(&minicache_lock); + atomic_spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(0xffff8000); mc_copy_user_page((void *)0xffff8000, kto); - spin_unlock(&minicache_lock); + atomic_spin_unlock(&minicache_lock); kunmap_atomic(kto, KM_USER1); } diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 4127a7b..7c81541 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -27,7 +27,7 @@ #define from_address (0xffff8000) #define to_address (0xffffc000) -static DEFINE_SPINLOCK(v6_lock); +static DEFINE_ATOMIC_SPINLOCK(v6_lock); /* * Copy the user page. No aliasing to deal with so we can just @@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, * Now copy the page using the same cache colour as the * pages ultimate destination. */ - spin_lock(&v6_lock); + atomic_spin_lock(&v6_lock); set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); @@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, copy_page((void *)kto, (void *)kfrom); - spin_unlock(&v6_lock); + atomic_spin_unlock(&v6_lock); } /* @@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad * Now clear the page using the same cache colour as * the pages ultimate destination. */ - spin_lock(&v6_lock); + atomic_spin_lock(&v6_lock); set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); - spin_unlock(&v6_lock); + atomic_spin_unlock(&v6_lock); } struct cpu_user_fns v6_user_fns __initdata = { diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 76824d3..4320cbe 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -32,7 +32,7 @@ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) -static DEFINE_SPINLOCK(minicache_lock); +static DEFINE_ATOMIC_SPINLOCK(minicache_lock); /* * XScale mini-dcache optimised copy_user_highpage @@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) __flush_dcache_page(page_mapping(from), from); - spin_lock(&minicache_lock); + atomic_spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(COPYPAGE_MINICACHE); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); - spin_unlock(&minicache_lock); + atomic_spin_unlock(&minicache_lock); kunmap_atomic(kto, KM_USER1); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 510c179..1576176 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -41,7 +41,7 @@ * These are the page tables (2MB each) covering uncached, DMA consistent allocations */ static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; -static DEFINE_SPINLOCK(consistent_lock); +static DEFINE_ATOMIC_SPINLOCK(consistent_lock); /* * VM region handling support. @@ -97,7 +97,7 @@ arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp) if (!new) goto out; - spin_lock_irqsave(&consistent_lock, flags); + atomic_spin_lock_irqsave(&consistent_lock, flags); list_for_each_entry(c, &head->vm_list, vm_list) { if ((addr + size) < addr) @@ -118,11 +118,11 @@ arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp) new->vm_end = addr + size; new->vm_active = 1; - spin_unlock_irqrestore(&consistent_lock, flags); + atomic_spin_unlock_irqrestore(&consistent_lock, flags); return new; nospc: - spin_unlock_irqrestore(&consistent_lock, flags); + atomic_spin_unlock_irqrestore(&consistent_lock, flags); kfree(new); out: return NULL; @@ -317,9 +317,9 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - spin_lock_irqsave(&consistent_lock, flags); + atomic_spin_lock_irqsave(&consistent_lock, flags); c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); - spin_unlock_irqrestore(&consistent_lock, flags); + atomic_spin_unlock_irqrestore(&consistent_lock, flags); if (c) { unsigned long off = vma->vm_pgoff; @@ -378,13 +378,13 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr size = PAGE_ALIGN(size); - spin_lock_irqsave(&consistent_lock, flags); + atomic_spin_lock_irqsave(&consistent_lock, flags); c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); if (!c) goto no_area; c->vm_active = 0; - spin_unlock_irqrestore(&consistent_lock, flags); + atomic_spin_unlock_irqrestore(&consistent_lock, flags); if ((c->vm_end - c->vm_start) != size) { printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", @@ -431,15 +431,15 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr flush_tlb_kernel_range(c->vm_start, c->vm_end); - spin_lock_irqsave(&consistent_lock, flags); + atomic_spin_lock_irqsave(&consistent_lock, flags); list_del(&c->vm_list); - spin_unlock_irqrestore(&consistent_lock, flags); + atomic_spin_unlock_irqrestore(&consistent_lock, flags); kfree(c); return; no_area: - spin_unlock_irqrestore(&consistent_lock, flags); + atomic_spin_unlock_irqrestore(&consistent_lock, flags); printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", __func__, cpu_addr); dump_stack(); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 6fdcbb7..02a07d8 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -258,7 +258,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; /* diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4722582..8f1e182 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -29,8 +29,6 @@ #include "mm.h" -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - /* * empty_zero_page is a special page that is used for * zero-initialized data and COW. diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 3fcd752..695f8e2 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c @@ -48,9 +48,9 @@ static int op_arm_setup(void) { int ret; - spin_lock(&oprofilefs_lock); + atomic_spin_lock(&oprofilefs_lock); ret = op_arm_model->setup_ctrs(); - spin_unlock(&oprofilefs_lock); + atomic_spin_unlock(&oprofilefs_lock); return ret; } diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 4ce0f98..55764a0 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c @@ -263,10 +263,10 @@ static void em_route_irq(int irq, unsigned int cpu) struct irq_desc *desc = irq_desc + irq; const struct cpumask *mask = cpumask_of(cpu); - spin_lock_irq(&desc->lock); + atomic_spin_lock_irq(&desc->lock); cpumask_copy(desc->affinity, mask); desc->chip->set_affinity(irq, mask); - spin_unlock_irq(&desc->lock); + atomic_spin_unlock_irq(&desc->lock); } static int em_setup(void) diff --git a/arch/arm/oprofile/op_model_xscale.c b/arch/arm/oprofile/op_model_xscale.c index 724ab9c..cbe91ee 100644 --- a/arch/arm/oprofile/op_model_xscale.c +++ b/arch/arm/oprofile/op_model_xscale.c @@ -381,8 +381,9 @@ static int xscale_pmu_start(void) { int ret; u32 pmnc = read_pmnc(); + unsigned long irq_flags = IRQF_DISABLED | IRQF_NODELAY; - ret = request_irq(XSCALE_PMU_IRQ, xscale_pmu_interrupt, IRQF_DISABLED, + ret = request_irq(XSCALE_PMU_IRQ, xscale_pmu_interrupt, irq_flags, "XScale PMU", (void *)results); if (ret < 0) { diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c index e8c327a..a4447ce 100644 --- a/arch/arm/plat-omap/clock.c +++ b/arch/arm/plat-omap/clock.c @@ -108,15 +108,12 @@ EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { - unsigned long flags; unsigned long ret = 0; if (clk == NULL || IS_ERR(clk)) return 0; - spin_lock_irqsave(&clockfw_lock, flags); ret = clk->rate; - spin_unlock_irqrestore(&clockfw_lock, flags); return ret; } diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h index 6408213..92ecd84 100644 --- a/arch/avr32/include/asm/pgalloc.h +++ b/arch/avr32/include/asm/pgalloc.h @@ -83,7 +83,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) quicklist_free_page(QUICK_PT, NULL, pte); } -#define __pte_free_tlb(tlb,pte) \ +#define __pte_free_tlb(tlb,pte,addr) \ do { \ pgtable_page_dtor(pte); \ tlb_remove_page((tlb), pte); \ diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c index 9f57222..778f6ef 100644 --- a/arch/avr32/kernel/irq.c +++ b/arch/avr32/kernel/irq.c @@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -66,7 +66,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 4b5fd36..d82bcb0 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -46,7 +46,7 @@ void ack_bad_irq(unsigned int irq) static struct irq_desc bad_irq_desc = { .handle_irq = handle_bad_irq, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(irq_desc->lock), }; #ifdef CONFIG_CPUMASK_OFFSTACK @@ -62,7 +62,7 @@ int show_interrupts(struct seq_file *p, void *v) unsigned long flags; if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index adb54aa..96b676a 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c @@ -128,7 +128,7 @@ irqreturn_t timer_interrupt(int irq, void *dummy) /* last time the cmos clock got updated */ static long last_rtc_update; - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); do_timer(1); /* @@ -148,7 +148,7 @@ irqreturn_t timer_interrupt(int irq, void *dummy) /* Do it again in 60s. */ last_rtc_update = xtime.tv_sec - 600; } - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); #ifdef CONFIG_IPIPE update_root_process_times(get_irq_regs()); @@ -192,12 +192,12 @@ void do_gettimeofday(struct timeval *tv) unsigned long usec, sec; do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); + seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags); usec = gettimeoffset(); sec = xtime.tv_sec; usec += (xtime.tv_nsec / NSEC_PER_USEC); } - while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags)); while (usec >= USEC_PER_SEC) { usec -= USEC_PER_SEC; @@ -217,7 +217,7 @@ int do_settimeofday(struct timespec *tv) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irq(&xtime_lock); + write_atomic_seqlock_irq(&xtime_lock); /* * This is revolting. We need to set the xtime.tv_usec * correctly. However, the value in this location is @@ -235,7 +235,7 @@ int do_settimeofday(struct timespec *tv) ntp_clear(); - write_sequnlock_irq(&xtime_lock); + write_atomic_sequnlock_irq(&xtime_lock); clock_was_set(); return 0; diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h index a1ba761..6da975d 100644 --- a/arch/cris/include/asm/pgalloc.h +++ b/arch/cris/include/asm/pgalloc.h @@ -47,7 +47,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } -#define __pte_free_tlb(tlb,pte) \ +#define __pte_free_tlb(tlb,pte,address) \ do { \ pgtable_page_dtor(pte); \ tlb_remove_page((tlb), pte); \ diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 7f642fc..45a270e 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -57,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c index 074fe7d..72408ed 100644 --- a/arch/cris/kernel/time.c +++ b/arch/cris/kernel/time.c @@ -87,7 +87,7 @@ int do_settimeofday(struct timespec *tv) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irq(&xtime_lock); + write_atomic_seqlock_irq(&xtime_lock); /* * This is revolting. We need to set "xtime" correctly. However, the * value in this location is the value at the most recent update of @@ -103,7 +103,7 @@ int do_settimeofday(struct timespec *tv) set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); ntp_clear(); - write_sequnlock_irq(&xtime_lock); + write_atomic_sequnlock_irq(&xtime_lock); clock_was_set(); return 0; } diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h index 68e4677..5cb8dff 100644 --- a/arch/frv/include/asm/highmem.h +++ b/arch/frv/include/asm/highmem.h @@ -116,6 +116,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) { unsigned long paddr; + preempt_disable(); pagefault_disable(); debug_kmap_atomic(type); paddr = page_to_phys(page); @@ -173,6 +174,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) BUG(); } pagefault_enable(); + preempt_enable(); } #endif /* !__ASSEMBLY__ */ diff --git a/arch/frv/include/asm/pgalloc.h b/arch/frv/include/asm/pgalloc.h index 971e6ad..416d19a 100644 --- a/arch/frv/include/asm/pgalloc.h +++ b/arch/frv/include/asm/pgalloc.h @@ -49,7 +49,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } -#define __pte_free_tlb(tlb,pte) \ +#define __pte_free_tlb(tlb,pte,address) \ do { \ pgtable_page_dtor(pte); \ tlb_remove_page((tlb),(pte)); \ @@ -62,7 +62,7 @@ do { \ */ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); }) #define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) +#define __pmd_free_tlb(tlb,x,a) do { } while (0) #endif /* CONFIG_MMU */ diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 3323301..22c6069 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -225,7 +225,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) */ #define pud_alloc_one(mm, address) NULL #define pud_free(mm, x) do { } while (0) -#define __pud_free_tlb(tlb, x) do { } while (0) +#define __pud_free_tlb(tlb, x, address) do { } while (0) /* * The "pud_xxx()" functions here are trivial for a folded two-level diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index af3e824..7b29f55 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { seq_printf(p, "%3d: ", i); @@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); } diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c index fb0ce75..fced1e3 100644 --- a/arch/frv/kernel/time.c +++ b/arch/frv/kernel/time.c @@ -70,7 +70,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy) * the irq version of write_lock because as just said we have irq * locally disabled. -arca */ - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); do_timer(1); @@ -96,7 +96,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy) __set_LEDS(n); #endif /* CONFIG_HEARTBEAT */ - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); update_process_times(user_mode(get_irq_regs())); diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index 74f8dd7..7dde350 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c @@ -191,7 +191,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_puts(p, " CPU0"); if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -205,7 +205,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c index 7f2d6cf..cb110a4 100644 --- a/arch/h8300/kernel/time.c +++ b/arch/h8300/kernel/time.c @@ -35,9 +35,9 @@ void h8300_timer_tick(void) { if (current->pid) profile_tick(CPU_PROFILING); - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); do_timer(1); - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); update_process_times(user_mode(get_irq_regs())); } diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index b9ac1a6..96a8d92 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -48,7 +48,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) { quicklist_free(0, NULL, pud); } -#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud) +#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) #endif /* CONFIG_PGTABLE_4 */ static inline void @@ -67,7 +67,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) quicklist_free(0, NULL, pmd); } -#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) +#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) static inline void pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte) @@ -117,6 +117,6 @@ static inline void check_pgt_cache(void) quicklist_trim(0, NULL, 25, 16); } -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) #endif /* _ASM_IA64_PGALLOC_H */ diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index fbee74b..55d006d 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -33,7 +33,7 @@ /* * the semaphore definition */ -struct rw_semaphore { +struct rw_anon_semaphore { signed long count; spinlock_t wait_lock; struct list_head wait_list; @@ -51,26 +51,47 @@ struct rw_semaphore { LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -static inline void -init_rwsem (struct rw_semaphore *sem) + struct rw_anon_semaphore name = __RWSEM_INITIALIZER(name) + +extern struct rw_anon_semaphore * +rwsem_down_read_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_write_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_wake(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_downgrade_wake(struct rw_anon_semaphore *sem); + +static inline void init_anon_rwsem (struct rw_anon_semaphore *sem) { sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); } +struct rw_anon_semaphore { + signed long count; + spinlock_t wait_lock; + struct list_head wait_list; +}; + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + init_anon_rwsem((struct rw_anon_semaphore *)sem); +} + /* * lock for reading */ static inline void -__down_read (struct rw_semaphore *sem) +__down_read (struct rw_anon_semaphore *sem) { long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1); @@ -82,7 +103,7 @@ __down_read (struct rw_semaphore *sem) * lock for writing */ static inline void -__down_write (struct rw_semaphore *sem) +__down_write (struct rw_anon_semaphore *sem) { long old, new; @@ -99,7 +120,7 @@ __down_write (struct rw_semaphore *sem) * unlock after reading */ static inline void -__up_read (struct rw_semaphore *sem) +__up_read (struct rw_anon_semaphore *sem) { long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1); @@ -111,7 +132,7 @@ __up_read (struct rw_semaphore *sem) * unlock after writing */ static inline void -__up_write (struct rw_semaphore *sem) +__up_write (struct rw_anon_semaphore *sem) { long old, new; @@ -128,7 +149,7 @@ __up_write (struct rw_semaphore *sem) * trylock for reading -- returns 1 if successful, 0 if contention */ static inline int -__down_read_trylock (struct rw_semaphore *sem) +__down_read_trylock (struct rw_anon_semaphore *sem) { long tmp; while ((tmp = sem->count) >= 0) { @@ -143,7 +164,7 @@ __down_read_trylock (struct rw_semaphore *sem) * trylock for writing -- returns 1 if successful, 0 if contention */ static inline int -__down_write_trylock (struct rw_semaphore *sem) +__down_write_trylock (struct rw_anon_semaphore *sem) { long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); @@ -154,7 +175,7 @@ __down_write_trylock (struct rw_semaphore *sem) * downgrade write lock to read lock */ static inline void -__downgrade_write (struct rw_semaphore *sem) +__downgrade_write (struct rw_anon_semaphore *sem) { long old, new; @@ -174,6 +195,11 @@ __downgrade_write (struct rw_semaphore *sem) #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) +static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) +{ + return (sem->count != 0); +} + static inline int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 20d8a39..85d965c 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -236,22 +236,22 @@ do { \ __tlb_remove_tlb_entry(tlb, ptep, addr); \ } while (0) -#define pte_free_tlb(tlb, ptep) \ +#define pte_free_tlb(tlb, ptep, address) \ do { \ tlb->need_flush = 1; \ - __pte_free_tlb(tlb, ptep); \ + __pte_free_tlb(tlb, ptep, address); \ } while (0) -#define pmd_free_tlb(tlb, ptep) \ +#define pmd_free_tlb(tlb, ptep, address) \ do { \ tlb->need_flush = 1; \ - __pmd_free_tlb(tlb, ptep); \ + __pmd_free_tlb(tlb, ptep, address); \ } while (0) -#define pud_free_tlb(tlb, pudp) \ +#define pud_free_tlb(tlb, pudp, address) \ do { \ tlb->need_flush = 1; \ - __pud_free_tlb(tlb, pudp); \ + __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif /* _ASM_IA64_TLB_H */ diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d89512..26f5123 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); return 0; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b5..70763a0 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) desc = irq_desc + irq; cfg = irq_cfg + irq; - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; @@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); } return IRQ_HANDLED; } diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index e6676fc..414a22e 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -643,7 +643,7 @@ salinfo_init(void) for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { data = salinfo_data + i; data->type = i; - init_MUTEX(&data->mutex); + semaphore_init(&data->mutex); dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); if (!dir) continue; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 4990495..c6e8a37 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -197,10 +197,10 @@ timer_interrupt (int irq, void *dev_id) * another CPU. We need to avoid to SMP race by acquiring the * xtime_lock. */ - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); do_timer(1); local_cpu_data->itm_next = new_itm; - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); } else local_cpu_data->itm_next = new_itm; @@ -477,7 +477,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c) { unsigned long flags; - write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); + write_atomic_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); /* copy fsyscall clock data */ fsyscall_gtod_data.clk_mask = c->mask; @@ -500,6 +500,6 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c) fsyscall_gtod_data.monotonic_time.tv_sec++; } - write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); + write_atomic_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); } diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index fb83326..7ec3f56 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c @@ -141,10 +141,10 @@ consider_steal_time(unsigned long new_itm) delta_itm += local_cpu_data->itm_delta * (stolen + blocked); if (cpu == time_keeper_id) { - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); do_timer(stolen + blocked); local_cpu_data->itm_next = delta_itm + new_itm; - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); } else { local_cpu_data->itm_next = delta_itm + new_itm; } diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h index f11a2b9..0fc7361 100644 --- a/arch/m32r/include/asm/pgalloc.h +++ b/arch/m32r/include/asm/pgalloc.h @@ -58,7 +58,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) /* * allocating and freeing a pmd is trivial: the 1-entry pmd is @@ -68,7 +68,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb, x) do { } while (0) +#define __pmd_free_tlb(tlb, x, addr) do { } while (0) #define pgd_populate(mm, pmd, pte) BUG() #define check_pgt_cache() do { } while (0) diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 8dfd31e..351e82d 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index cada3ba..3e00242 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -106,7 +106,7 @@ void do_gettimeofday(struct timeval *tv) unsigned long max_ntp_tick = tick_usec - tickadj; do { - seq = read_seqbegin(&xtime_lock); + seq = read_atomic_seqbegin(&xtime_lock); usec = do_gettimeoffset(); @@ -120,7 +120,7 @@ void do_gettimeofday(struct timeval *tv) sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); - } while (read_seqretry(&xtime_lock, seq)); + } while (read_atomic_seqretry(&xtime_lock, seq)); while (usec >= 1000000) { usec -= 1000000; @@ -141,7 +141,7 @@ int do_settimeofday(struct timespec *tv) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irq(&xtime_lock); + write_atomic_seqlock_irq(&xtime_lock); /* * This is revolting. We need to set "xtime" correctly. However, the * value in this location is the value at the most recent update of @@ -157,7 +157,7 @@ int do_settimeofday(struct timespec *tv) set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); ntp_clear(); - write_sequnlock_irq(&xtime_lock); + write_atomic_sequnlock_irq(&xtime_lock); clock_was_set(); return 0; @@ -202,7 +202,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be * called as close as possible to 500 ms before the new second starts. */ - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); if (ntp_synced() && xtime.tv_sec > last_rtc_update + 660 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2 @@ -213,7 +213,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) else /* do it again in 60 s */ last_rtc_update = xtime.tv_sec - 600; } - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); /* As we return to user mode fire off the other CPU schedulers.. this is basically because we don't yet share IRQ's around. This message is rigged to be safe on the 386 - basically it's diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index d08bf62..15ee4c7 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h @@ -54,7 +54,8 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page) __free_page(page); } -static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page) +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, + unsigned long address) { pgtable_page_dtor(page); cache_page(kmap(page)); @@ -73,7 +74,8 @@ static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd) return free_pointer_table(pmd); } -static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) +static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long address) { return free_pointer_table(pmd); } diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index d4c83f1..48d80d5 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h @@ -32,7 +32,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page) __free_page(page); } -#define __pte_free_tlb(tlb,pte) \ +#define __pte_free_tlb(tlb,pte,addr) \ do { \ pgtable_page_dtor(pte); \ tlb_remove_page((tlb), pte); \ @@ -80,7 +80,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page * inside the pgd, so has no extra memory associated with it. */ #define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb, x) do { } while (0) +#define __pmd_free_tlb(tlb, x, addr) do { } while (0) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 54d9807..612259c 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -102,7 +102,7 @@ void do_gettimeofday(struct timeval *tv) unsigned long max_ntp_tick = tick_usec - tickadj; do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); + seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags); usec = mach_gettimeoffset(); @@ -116,7 +116,7 @@ void do_gettimeofday(struct timeval *tv) sec = xtime.tv_sec; usec += xtime.tv_nsec/1000; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags)); while (usec >= 1000000) { @@ -138,7 +138,7 @@ int do_settimeofday(struct timespec *tv) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irq(&xtime_lock); + write_atomic_seqlock_irq(&xtime_lock); /* This is revolting. We need to set the xtime.tv_nsec * correctly. However, the value in this location is * is value at the last tick. @@ -154,7 +154,7 @@ int do_settimeofday(struct timespec *tv) set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); ntp_clear(); - write_sequnlock_irq(&xtime_lock); + write_atomic_sequnlock_irq(&xtime_lock); clock_was_set(); return 0; } diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c index d182b2f..d3c646d 100644 --- a/arch/m68knommu/kernel/time.c +++ b/arch/m68knommu/kernel/time.c @@ -44,11 +44,11 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy) if (current->pid) profile_tick(CPU_PROFILING); - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); do_timer(1); - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index d0bcf80..8439598 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -6,14 +6,16 @@ endif # What CPU vesion are we building for, and crack it open # as major.minor.rev -CPU_VER=$(subst ",,$(CONFIG_XILINX_MICROBLAZE0_HW_VER) ) -CPU_MAJOR=$(shell echo $(CPU_VER) | cut -d '.' -f 1) -CPU_MINOR=$(shell echo $(CPU_VER) | cut -d '.' -f 2) -CPU_REV=$(shell echo $(CPU_VER) | cut -d '.' -f 3) +CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER)) +CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1) +CPU_MINOR := $(shell echo $(CPU_VER) | cut -d '.' -f 2) +CPU_REV := $(shell echo $(CPU_VER) | cut -d '.' -f 3) export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV # Use cpu-related CONFIG_ vars to set compile options. +# The various CONFIG_XILINX cpu features options are integers 0/1/2... +# rather than bools y/n # Work out HW multipler support. This is icky. # 1. Spartan2 has no HW multiplers. @@ -34,30 +36,29 @@ CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) -# The various CONFIG_XILINX cpu features options are integers 0/1/2... -# rather than bools y/n - # r31 holds current when in kernel mode -CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) +KBUILD_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) LDFLAGS := LDFLAGS_vmlinux := -LDFLAGS_BLOB := --format binary --oformat elf32-microblaze -LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name) +LIBGCC := $(shell $(CC) $(KBUILD_KERNEL) -print-libgcc-file-name) -head-y := arch/microblaze/kernel/head.o -libs-y += arch/microblaze/lib/ $(LIBGCC) -core-y += arch/microblaze/kernel/ arch/microblaze/mm/ \ - arch/microblaze/platform/ +head-y := arch/microblaze/kernel/head.o +libs-y += arch/microblaze/lib/ +libs-y += $(LIBGCC) +core-y += arch/microblaze/kernel/ +core-y += arch/microblaze/mm/ +core-y += arch/microblaze/platform/ -boot := arch/$(ARCH)/boot +boot := arch/microblaze/boot # defines filename extension depending memory management type ifeq ($(CONFIG_MMU),) -MMUEXT := -nommu +MMU := -nommu endif -export MMUEXT + +export MMU all: linux.bin diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 5c17342..7c3ec13 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -14,7 +14,6 @@ #include #include #include -#include #include /* Get struct page {...} */ diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index 59a757e..b0131da 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h @@ -180,7 +180,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) __free_page(ptepage); } -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte)) @@ -193,7 +193,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) */ #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) /*#define pmd_free(mm, x) do { } while (0)*/ -#define __pmd_free_tlb(tlb, x) do { } while (0) +#define __pmd_free_tlb(tlb, x, addr) do { } while (0) #define pgd_populate(mm, pmd, pte) BUG() extern int do_check_pgt_cache(int, int); diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 4c57a58..cc3a4df 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -185,6 +185,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* Definitions for MicroBlaze. */ #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ +#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ @@ -320,8 +321,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -/* FIXME */ -static inline int pte_file(pte_t pte) { return 0; } +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -488,7 +488,7 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) /* Encode and decode a nonlinear file mapping entry */ #define PTE_FILE_MAX_BITS 29 #define pte_to_pgoff(pte) (pte_val(pte) >> 3) -#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) }) +#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index 20f7b3a..37e6f30 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h @@ -16,6 +16,18 @@ #define _ASM_MICROBLAZE_PROM_H #ifdef __KERNEL__ +/* Definitions used by the flattened device tree */ +#define OF_DT_HEADER 0xd00dfeed /* marker */ +#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ +#define OF_DT_END_NODE 0x2 /* End node */ +#define OF_DT_PROP 0x3 /* Property: name off, size, content */ +#define OF_DT_NOP 0x4 /* nop */ +#define OF_DT_END 0x9 + +#define OF_DT_VERSION 0x10 + +#ifndef __ASSEMBLY__ + #include #include #include @@ -29,16 +41,6 @@ #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) -/* Definitions used by the flattened device tree */ -#define OF_DT_HEADER 0xd00dfeed /* marker */ -#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ -#define OF_DT_END_NODE 0x2 /* End node */ -#define OF_DT_PROP 0x3 /* Property: name off, size, content */ -#define OF_DT_NOP 0x4 /* nop */ -#define OF_DT_END 0x9 - -#define OF_DT_VERSION 0x10 - /* * This is what gets passed to the kernel by prom_init or kexec * @@ -309,5 +311,6 @@ extern void __iomem *of_iomap(struct device_node *device, int index); */ #include +#endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_MICROBLAZE_PROM_H */ diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h index c472d28..e8abd4a 100644 --- a/arch/microblaze/include/asm/tlb.h +++ b/arch/microblaze/include/asm/tlb.h @@ -11,7 +11,7 @@ #ifndef _ASM_MICROBLAZE_TLB_H #define _ASM_MICROBLAZE_TLB_H -#define tlb_flush(tlb) do {} while (0) +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #include diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 65adad6..5431b46 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -189,7 +189,7 @@ extern long strnlen_user(const char *src, long count); #define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __gu_val = x; \ + __typeof__(*(ptr)) volatile __gu_val = (x); \ long __gu_err = 0; \ switch (sizeof(__gu_val)) { \ case 1: \ diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index f4a5e19..d487729 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile @@ -17,4 +17,4 @@ obj-$(CONFIG_HEART_BEAT) += heartbeat.o obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o obj-$(CONFIG_MMU) += misc.o -obj-y += entry$(MMUEXT).o +obj-y += entry$(MMU).o diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c index 153f57c..c259786 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c +++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c @@ -22,7 +22,7 @@ #define CI(c, p) { ci->c = PVR_##p(pvr); } #define err_printk(x) \ - early_printk("ERROR: Microblaze " x " - different for PVR and DTS\n"); + early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) { diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c index 450ca6b..adb448f 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo-static.c +++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c @@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY; static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER; #define err_printk(x) \ - early_printk("ERROR: Microblaze " x "- different for kernel and DTS\n"); + early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n"); void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) { diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index a10bea1..c411c67 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c @@ -26,6 +26,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = { {"7.10.b", 0x09}, {"7.10.c", 0x0a}, {"7.10.d", 0x0b}, + {"7.20.a", 0x0c}, + {"7.20.b", 0x0d}, /* FIXME There is no keycode defined in MBV for these versions */ {"2.10.a", 0x10}, {"3.00.a", 0x20}, diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index e568d6e..e41c6ce 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -31,6 +31,7 @@ #include #include #include +#include /* for OF_DT_HEADER */ #ifdef CONFIG_MMU #include /* COMMAND_LINE_SIZE */ @@ -54,11 +55,19 @@ ENTRY(_start) andi r1, r1, ~2 mts rmsr, r1 -/* save fdt to kernel location */ -/* r7 stores pointer to fdt blob */ - beqi r7, no_fdt_arg +/* r7 may point to an FDT, or there may be one linked in. + if it's in r7, we've got to save it away ASAP. + We ensure r7 points to a valid FDT, just in case the bootloader + is broken or non-existent */ + beqi r7, no_fdt_arg /* NULL pointer? don't copy */ + lw r11, r0, r7 /* Does r7 point to a */ + rsubi r11, r11, OF_DT_HEADER /* valid FDT? */ + beqi r11, _prepare_copy_fdt + or r7, r0, r0 /* clear R7 when not valid DTB */ + bnei r11, no_fdt_arg /* No - get out of here */ +_prepare_copy_fdt: or r11, r0, r0 /* incremment */ - ori r4, r0, TOPHYS(_fdt_start) /* save bram context */ + ori r4, r0, TOPHYS(_fdt_start) ori r3, r0, (0x4000 - 4) _copy_fdt: lw r12, r7, r11 /* r12 = r7 + r11 */ diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 9d591cd..3288c97 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -74,6 +74,7 @@ #include #include +#include #include /* Helpful Macros */ @@ -428,19 +429,9 @@ handle_unaligned_ex: mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ nop _no_delayslot: -#endif - -#ifdef CONFIG_MMU - /* Check if unaligned address is last on a 4k page */ - andi r5, r4, 0xffc - xori r5, r5, 0xffc - bnei r5, _unaligned_ex2 - _unaligned_ex1: - RESTORE_STATE; -/* Another page must be accessed or physical address not in page table */ - bri unaligned_data_trap - - _unaligned_ex2: + /* jump to high level unaligned handler */ + RESTORE_STATE; + bri unaligned_data_trap #endif andi r6, r3, 0x3E0; /* Mask and extract the register operand */ srl r6, r6; /* r6 >> 5 */ @@ -450,45 +441,6 @@ _no_delayslot: srl r6, r6; /* Store the register operand in a temporary location */ sbi r6, r0, TOPHYS(ex_reg_op); -#ifdef CONFIG_MMU - /* Get physical address */ - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - ori r5, r0, CONFIG_KERNEL_START - cmpu r5, r4, r5 - bgti r5, _unaligned_ex3 - ori r5, r0, swapper_pg_dir - bri _unaligned_ex4 - - /* Get the PGD for the current thread. */ -_unaligned_ex3: /* user thread */ - addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */ - lwi r5, r5, TASK_THREAD + PGDIR -_unaligned_ex4: - tophys(r5,r5) - BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */ - andi r6, r6, 0xffc -/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */ - or r5, r5, r6 - lwi r6, r5, 0 /* Get L1 entry */ - andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */ - beqi r5, _unaligned_ex1 /* Bail if no table */ - - tophys(r5,r5) - BSRLI(r6,r4,10) /* Compute PTE address */ - andi r6, r6, 0xffc - andi r5, r5, 0xfffff003 - or r5, r5, r6 - lwi r5, r5, 0 /* Get Linux PTE */ - - andi r6, r5, _PAGE_PRESENT - beqi r6, _unaligned_ex1 /* Bail if no page */ - - andi r5, r5, 0xfffff000 /* Extract RPN */ - andi r4, r4, 0x00000fff /* Extract offset */ - or r4, r4, r5 /* Create physical address */ -#endif /* CONFIG_MMU */ andi r6, r3, 0x400; /* Extract ESR[S] */ bnei r6, ex_sw; @@ -959,15 +911,15 @@ _unaligned_data_exception: andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ ex_lw_vm: beqid r6, ex_lhw_vm; - lbui r5, r4, 0; /* Exception address in r4 - delay slot */ +load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */ /* Load a word, byte-by-byte from destination address and save it in tmp space*/ la r6, r0, ex_tmp_data_loc_0; sbi r5, r6, 0; - lbui r5, r4, 1; +load2: lbui r5, r4, 1; sbi r5, r6, 1; - lbui r5, r4, 2; +load3: lbui r5, r4, 2; sbi r5, r6, 2; - lbui r5, r4, 3; +load4: lbui r5, r4, 3; sbi r5, r6, 3; brid ex_lw_tail_vm; /* Get the destination register value into r3 - delay slot */ @@ -977,7 +929,7 @@ ex_lhw_vm: * save it in tmp space */ la r6, r0, ex_tmp_data_loc_0; sbi r5, r6, 0; - lbui r5, r4, 1; +load5: lbui r5, r4, 1; sbi r5, r6, 1; lhui r3, r6, 0; /* Get the destination register value into r3 */ ex_lw_tail_vm: @@ -996,22 +948,53 @@ ex_sw_tail_vm: swi r3, r5, 0; /* Get the word - delay slot */ /* Store the word, byte-by-byte into destination address */ lbui r3, r5, 0; - sbi r3, r4, 0; +store1: sbi r3, r4, 0; lbui r3, r5, 1; - sbi r3, r4, 1; +store2: sbi r3, r4, 1; lbui r3, r5, 2; - sbi r3, r4, 2; +store3: sbi r3, r4, 2; lbui r3, r5, 3; brid ret_from_exc; - sbi r3, r4, 3; /* Delay slot */ +store4: sbi r3, r4, 3; /* Delay slot */ ex_shw_vm: /* Store the lower half-word, byte-by-byte into destination address */ lbui r3, r5, 2; - sbi r3, r4, 0; +store5: sbi r3, r4, 0; lbui r3, r5, 3; brid ret_from_exc; - sbi r3, r4, 1; /* Delay slot */ +store6: sbi r3, r4, 1; /* Delay slot */ ex_sw_end_vm: /* Exception handling of store word, ends. */ + +/* We have to prevent cases that get/put_user macros get unaligned pointer + * to bad page area. We have to find out which origin instruction caused it + * and called fixup for that origin instruction not instruction in unaligned + * handler */ +ex_unaligned_fixup: + ori r5, r7, 0 /* setup pointer to pt_regs */ + lwi r6, r7, PT_PC; /* faulting address is one instruction above */ + addik r6, r6, -4 /* for finding proper fixup */ + swi r6, r7, PT_PC; /* a save back it to PT_PC */ + addik r7, r0, SIGSEGV + /* call bad_page_fault for finding aligned fixup, fixup address is saved + * in PT_PC which is used as return address from exception */ + la r15, r0, ret_from_exc-8 /* setup return address */ + brid bad_page_fault + nop + +/* We prevent all load/store because it could failed any attempt to access */ +.section __ex_table,"a"; + .word load1,ex_unaligned_fixup; + .word load2,ex_unaligned_fixup; + .word load3,ex_unaligned_fixup; + .word load4,ex_unaligned_fixup; + .word load5,ex_unaligned_fixup; + .word store1,ex_unaligned_fixup; + .word store2,ex_unaligned_fixup; + .word store3,ex_unaligned_fixup; + .word store4,ex_unaligned_fixup; + .word store5,ex_unaligned_fixup; + .word store6,ex_unaligned_fixup; +.previous; .end _unaligned_data_exception #endif /* CONFIG_MMU */ diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index f688ee9..2c63c87 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -77,7 +77,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < nr_irq) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -98,7 +98,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c index 5141417..5a45b1a 100644 --- a/arch/microblaze/kernel/module.c +++ b/arch/microblaze/kernel/module.c @@ -57,7 +57,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; unsigned long int *location; - unsigned long int locoffs; unsigned long int value; #if __GNUC__ < 4 unsigned long int old_value; @@ -113,10 +112,12 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, break; case R_MICROBLAZE_64_PCREL: - locoffs = (location[0] & 0xFFFF) << 16 | +#if __GNUC__ < 4 + old_value = (location[0] & 0xFFFF) << 16 | (location[1] & 0xFFFF); - value -= (unsigned long int)(location) + 4 + - locoffs; + value -= old_value; +#endif + value -= (unsigned long int)(location) + 4; location[0] = (location[0] & 0xFFFF0000) | (value >> 16); location[1] = (location[1] & 0xFFFF0000) | @@ -125,6 +126,14 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, value); break; + case R_MICROBLAZE_32_PCREL_LO: + pr_debug("R_MICROBLAZE_32_PCREL_LO\n"); + break; + + case R_MICROBLAZE_64_NONE: + pr_debug("R_MICROBLAZE_NONE\n"); + break; + case R_MICROBLAZE_NONE: pr_debug("R_MICROBLAZE_NONE\n"); break; @@ -133,7 +142,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, printk(KERN_ERR "module %s: " "Unknown relocation: %u\n", module->name, - ELF32_R_TYPE(rela->r_info)); + ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; } } diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 8709bea..2a97bf5 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -138,8 +138,12 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, setup_early_printk(NULL); #endif - early_printk("Ramdisk addr 0x%08x, FDT 0x%08x\n", ram, fdt); - printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); + early_printk("Ramdisk addr 0x%08x, ", ram); + if (fdt) + early_printk("FDT at 0x%08x\n", fdt); + else + early_printk("Compiled-in FDT at 0x%08x\n", + (unsigned int)_fdt_start); #ifdef CONFIG_MTD_UCLINUX early_printk("Found romfs @ 0x%08x (0x%08x)\n", diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index e000bce..b96f168 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c @@ -33,105 +33,6 @@ #include #include -/* - * sys_ipc() is the de-multiplexer for the SysV IPC calls.. - * - * This is really horribly ugly. This will be remove with new toolchain. - */ -asmlinkage long -sys_ipc(uint call, int first, int second, int third, void *ptr, long fifth) -{ - int version, ret; - - version = call >> 16; /* hack for backward compatibility */ - call &= 0xffff; - - ret = -EINVAL; - switch (call) { - case SEMOP: - ret = sys_semop(first, (struct sembuf *)ptr, second); - break; - case SEMGET: - ret = sys_semget(first, second, third); - break; - case SEMCTL: - { - union semun fourth; - - if (!ptr) - break; - ret = (access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT) - || (get_user(fourth.__pad, (void **)ptr)) ; - if (ret) - break; - ret = sys_semctl(first, second, third, fourth); - break; - } - case MSGSND: - ret = sys_msgsnd(first, (struct msgbuf *) ptr, second, third); - break; - case MSGRCV: - switch (version) { - case 0: { - struct ipc_kludge tmp; - - if (!ptr) - break; - ret = (access_ok(VERIFY_READ, ptr, sizeof(tmp)) - ? 0 : -EFAULT) || copy_from_user(&tmp, - (struct ipc_kludge *) ptr, sizeof(tmp)); - if (ret) - break; - ret = sys_msgrcv(first, tmp.msgp, second, tmp.msgtyp, - third); - break; - } - default: - ret = sys_msgrcv(first, (struct msgbuf *) ptr, - second, fifth, third); - break; - } - break; - case MSGGET: - ret = sys_msgget((key_t) first, second); - break; - case MSGCTL: - ret = sys_msgctl(first, second, (struct msqid_ds *) ptr); - break; - case SHMAT: - switch (version) { - default: { - ulong raddr; - ret = access_ok(VERIFY_WRITE, (ulong *) third, - sizeof(ulong)) ? 0 : -EFAULT; - if (ret) - break; - ret = do_shmat(first, (char *) ptr, second, &raddr); - if (ret) - break; - ret = put_user(raddr, (ulong *) third); - break; - } - case 1: /* iBCS2 emulator entry point */ - if (!segment_eq(get_fs(), get_ds())) - break; - ret = do_shmat(first, (char *) ptr, second, - (ulong *) third); - break; - } - break; - case SHMDT: - ret = sys_shmdt((char *)ptr); - break; - case SHMGET: - ret = sys_shmget(first, second, third); - break; - case SHMCTL: - ret = sys_shmctl(first, second, (struct shmid_ds *) ptr); - break; - } - return ret; -} asmlinkage long microblaze_vfork(struct pt_regs *regs) { diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 31b32a6..216db81 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -121,7 +121,7 @@ ENTRY(sys_call_table) .long sys_wait4 .long sys_swapoff /* 115 */ .long sys_sysinfo - .long sys_ipc + .long sys_ni_syscall /* old sys_ipc */ .long sys_fsync .long sys_ni_syscall /* sys_sigreturn_wrapper */ .long sys_clone /* 120 */ diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 956607a..d9d249a 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -69,7 +69,7 @@ static int store_updates_sp(struct pt_regs *regs) * It is called from do_page_fault above and from some of the procedures * in traps.c. */ -static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) +void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) { const struct exception_table_entry *fixup; /* MS: no context */ @@ -122,15 +122,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, } #endif /* CONFIG_KGDB */ - if (in_atomic() || mm == NULL) { - /* FIXME */ - if (kernel_mode(regs)) { - printk(KERN_EMERG - "Page fault in kernel mode - Oooou!!! pid %d\n", - current->pid); - _exception(SIGSEGV, regs, code, address); - return; - } + if (in_atomic() || !mm) { + if (kernel_mode(regs)) + goto bad_area_nosemaphore; + /* in_atomic() in user mode is really bad, as is current->mm == NULL. */ printk(KERN_EMERG "Page fault in user mode with " diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h index 032ca73..ed7899f 100644 --- a/arch/mips/include/asm/i8253.h +++ b/arch/mips/include/asm/i8253.h @@ -12,7 +12,7 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -extern spinlock_t i8253_lock; +extern atomic_spinlock_t i8253_lock; extern void setup_pit_timer(void); diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 1275831..3738f4b 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -98,23 +98,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_pages(pte, PTE_ORDER); } -#define __pte_free_tlb(tlb,pte) \ +#define __pte_free_tlb(tlb,pte,address) \ do { \ pgtable_page_dtor(pte); \ tlb_remove_page((tlb), pte); \ } while (0) -#ifdef CONFIG_32BIT - -/* - * allocating and freeing a pmd is trivial: the 1-entry pmd is - * inside the pgd, so has no extra memory associated with it. - */ -#define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb, x) do { } while (0) - -#endif - #ifdef CONFIG_64BIT static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) @@ -132,7 +121,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) free_pages((unsigned long)pmd, PMD_ORDER); } -#define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x) +#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) #endif diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index f7d8d5d..4ac943d 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -15,7 +15,7 @@ #include #include -DEFINE_SPINLOCK(i8253_lock); +DEFINE_ATOMIC_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); /* @@ -26,7 +26,7 @@ EXPORT_SYMBOL(i8253_lock); static void init_pit_timer(enum clock_event_mode mode, struct clock_event_device *evt) { - spin_lock(&i8253_lock); + atomic_spin_lock(&i8253_lock); switch(mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -55,7 +55,7 @@ static void init_pit_timer(enum clock_event_mode mode, /* Nothing to do here */ break; } - spin_unlock(&i8253_lock); + atomic_spin_unlock(&i8253_lock); } /* @@ -65,10 +65,10 @@ static void init_pit_timer(enum clock_event_mode mode, */ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) { - spin_lock(&i8253_lock); + atomic_spin_lock(&i8253_lock); outb_p(delta & 0xff , PIT_CH0); /* LSB */ outb(delta >> 8 , PIT_CH0); /* MSB */ - spin_unlock(&i8253_lock); + atomic_spin_unlock(&i8253_lock); return 0; } @@ -137,7 +137,7 @@ static cycle_t pit_read(struct clocksource *cs) static int old_count; static u32 old_jifs; - spin_lock_irqsave(&i8253_lock, flags); + atomic_spin_lock_irqsave(&i8253_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine @@ -183,7 +183,7 @@ static cycle_t pit_read(struct clocksource *cs) old_count = count; old_jifs = jifs; - spin_unlock_irqrestore(&i8253_lock, flags); + atomic_spin_unlock_irqrestore(&i8253_lock, flags); count = (LATCH - 1) - count; diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7b845ba..50a7451 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_putc(p, '\n'); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 6751ce9..894927f 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto bad_area_nosemaphore; down_read(&mm->mmap_sem); diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index e274fda..c0c038b 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -45,7 +45,7 @@ void *__kmap_atomic(struct page *page, enum km_type type) enum fixed_addresses idx; unsigned long vaddr; - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -71,6 +71,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); + preempt_enable(); return; } @@ -85,6 +86,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) #endif pagefault_enable(); + preempt_enable(); } EXPORT_SYMBOL(__kunmap_atomic); @@ -97,6 +99,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) enum fixed_addresses idx; unsigned long vaddr; + preempt_disable(); pagefault_disable(); debug_kmap_atomic(type); diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 6d39e22..3da2ed2 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c @@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_set(MPIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MPIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_set(MAIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MAIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_set(MKIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MKIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask) struct irq_desc *desc = irq_desc + ETHERNET_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_set(MMACINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_macint); @@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask) struct irq_desc *desc = irq_desc + ETHERNET_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MMACINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_macint); @@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask) struct irq_desc *desc = irq_desc + DSIU_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_set(MDSIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_dsiuint); @@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask) struct irq_desc *desc = irq_desc + DSIU_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MDSIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_dsiuint); @@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask) struct irq_desc *desc = irq_desc + FIR_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_set(MFIRINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_firint); @@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask) struct irq_desc *desc = irq_desc + FIR_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_clear(MFIRINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_firint); @@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_write(MPCIINTREG, PCIINT0); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_write(MPCIINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_write(MSCUINTREG, SCUINT0); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_write(MSCUINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_set(MCSIINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_clear(MCSIINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_write(MBCUINTREG, BCUINTR); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); icu2_write(MBCUINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) pin = SYSINT1_IRQ_TO_PIN(irq); - spin_lock_irq(&desc->lock); + atomic_spin_lock_irq(&desc->lock); intassign0 = icu1_read(INTASSIGN0); intassign1 = icu1_read(INTASSIGN1); @@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) intassign1 |= (uint16_t)assign << 9; break; default: - spin_unlock_irq(&desc->lock); + atomic_spin_unlock_irq(&desc->lock); return -EINVAL; } @@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) icu1_write(INTASSIGN0, intassign0); icu1_write(INTASSIGN1, intassign1); - spin_unlock_irq(&desc->lock); + atomic_spin_unlock_irq(&desc->lock); return 0; } @@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) pin = SYSINT2_IRQ_TO_PIN(irq); - spin_lock_irq(&desc->lock); + atomic_spin_lock_irq(&desc->lock); intassign2 = icu1_read(INTASSIGN2); intassign3 = icu1_read(INTASSIGN3); @@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) intassign3 |= (uint16_t)assign << 12; break; default: - spin_unlock_irq(&desc->lock); + atomic_spin_unlock_irq(&desc->lock); return -EINVAL; } @@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) icu1_write(INTASSIGN2, intassign2); icu1_write(INTASSIGN3, intassign3); - spin_unlock_irq(&desc->lock); + atomic_spin_unlock_irq(&desc->lock); return 0; } diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h index ec057e1..a19f113 100644 --- a/arch/mn10300/include/asm/pgalloc.h +++ b/arch/mn10300/include/asm/pgalloc.h @@ -51,6 +51,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte) } -#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) +#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte)) #endif /* _ASM_PGALLOC_H */ diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 4c3c58e..c076cff 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v) /* display information rows, one per active CPU */ case 1 ... NR_IRQS - 1: - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { @@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); break; /* polish off with NMI and error counters */ diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index 395caf0..b25588c 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c @@ -99,7 +99,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) { unsigned tsc, elapse; - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); while (tsc = get_cycles(), elapse = mn10300_last_tsc - tsc, /* time elapsed since last @@ -114,7 +114,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) check_rtc_time(); } - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); update_process_times(user_mode(get_irq_regs())); diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h index 383b1db..0792490 100644 --- a/arch/parisc/include/asm/tlb.h +++ b/arch/parisc/include/asm/tlb.h @@ -21,7 +21,7 @@ do { if (!(tlb)->fullmm) \ #include -#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) #endif diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 330f536..5bbc62b 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v) if (i < NR_IRQS) { struct irqaction *action; - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index a79c6f9..908cfde 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -163,9 +163,9 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) } if (cpu == 0) { - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); do_timer(ticks_elapsed); - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); } return IRQ_HANDLED; @@ -268,12 +268,12 @@ void __init time_init(void) if (pdc_tod_read(&tod_data) == 0) { unsigned long flags; - write_seqlock_irqsave(&xtime_lock, flags); + write_atomic_seqlock_irqsave(&xtime_lock, flags); xtime.tv_sec = tod_data.tod_sec; xtime.tv_nsec = tod_data.tod_usec * 1000; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); - write_sequnlock_irqrestore(&xtime_lock, flags); + write_atomic_sequnlock_irqrestore(&xtime_lock, flags); } else { printk(KERN_ERR "Error reading tod clock\n"); xtime.tv_sec = 0; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d00131c..0b46b68 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -68,13 +68,6 @@ config LOCKDEP_SUPPORT bool default y -config RWSEM_GENERIC_SPINLOCK - bool - -config RWSEM_XCHGADD_ALGORITHM - bool - default y - config GENERIC_LOCKBREAK bool default y @@ -252,6 +245,14 @@ config HIGHMEM source kernel/time/Kconfig source kernel/Kconfig.hz source kernel/Kconfig.preempt + +config RWSEM_GENERIC_SPINLOCK + bool + default y + +config RWSEM_XCHGADD_ALGORITHM + bool + source "fs/Kconfig.binfmt" config HUGETLB_PAGE_SIZE_VARIABLE diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index a002682..582e47d 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -289,7 +289,7 @@ struct mpic #ifdef CONFIG_MPIC_U3_HT_IRQS /* The fixup table */ struct mpic_irq_fixup *fixups; - spinlock_t fixup_lock; + atomic_spinlock_t fixup_lock; #endif /* Register access method */ diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 0815eb4..c9500d6 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); */ /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ #define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) +#define __pmd_free_tlb(tlb,x,a) do { } while (0) /* #define pgd_populate(mm, pmd, pte) BUG() */ #ifndef CONFIG_BOOKE diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index afda2bd..e6f069c 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf) kmem_cache_free(pgtable_cache[cachenum], p); } -#define __pmd_free_tlb(tlb, pmd) \ +#define __pmd_free_tlb(tlb, pmd,addr) \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) #ifndef CONFIG_PPC_64K_PAGES -#define __pud_free_tlb(tlb, pud) \ +#define __pud_free_tlb(tlb, pud, addr) \ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index 5d84802..1730e5e 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); #ifdef CONFIG_SMP -#define __pte_free_tlb(tlb,ptepage) \ +#define __pte_free_tlb(tlb,ptepage,address) \ do { \ pgtable_page_dtor(ptepage); \ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ - PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ + PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ } while (0) #else -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte)) #endif diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 8cd083c..a1ddb07 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -202,8 +202,15 @@ static inline unsigned long pte_update(struct mm_struct *mm, assert_pte_locked(mm, addr); #ifdef CONFIG_PPC_STD_MMU_64 - if (old & _PAGE_HASHPTE) + if (old & _PAGE_HASHPTE) { +#ifdef CONFIG_PREEMPT_RT + preempt_disable(); +#endif hpte_need_flush(mm, addr, ptep, old, huge); +#ifdef CONFIG_PREEMPT_RT + preempt_enable(); +#endif + } #endif return old; diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h index 877c35a..ba11723 100644 --- a/arch/powerpc/include/asm/pmac_feature.h +++ b/arch/powerpc/include/asm/pmac_feature.h @@ -378,7 +378,7 @@ extern struct macio_chip* macio_find(struct device_node* child, int type); * Those are exported by pmac feature for internal use by arch code * only like the platform function callbacks, do not use directly in drivers */ -extern spinlock_t feature_lock; +extern atomic_spinlock_t feature_lock; extern struct device_node *uninorth_node; extern u32 __iomem *uninorth_base; diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h index 24cd928..c2494d4 100644 --- a/arch/powerpc/include/asm/rwsem.h +++ b/arch/powerpc/include/asm/rwsem.h @@ -21,7 +21,7 @@ /* * the semaphore definition */ -struct rw_semaphore { +struct rw_anon_semaphore { /* XXX this should be able to be an atomic_t -- paulus */ signed int count; #define RWSEM_UNLOCKED_VALUE 0x00000000 @@ -38,43 +38,47 @@ struct rw_semaphore { }; #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -# define __RWSEM_DEP_MAP_INIT(lockname) +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) #endif -#define __RWSEM_INITIALIZER(name) \ +#define __RWSEM_ANON_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } + LIST_HEAD_INIT((name).wait_list) __RWSEM_ANON_DEP_MAP_INIT(name) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define DECLARE_ANON_RWSEM(name) \ + struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_read_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_write_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_wake(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_downgrade_wake(struct rw_anon_semaphore *sem); -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, +extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name, struct lock_class_key *key); -#define init_rwsem(sem) \ +#define init_anon_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __init_anon_rwsem((sem), #sem, &__key); \ } while (0) /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct rw_anon_semaphore *sem) { if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) rwsem_down_read_failed(sem); } -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct rw_anon_semaphore *sem) { int tmp; @@ -90,7 +94,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void __down_write_nested(struct rw_anon_semaphore *sem, int subclass) { int tmp; @@ -100,12 +104,12 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) rwsem_down_write_failed(sem); } -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct rw_anon_semaphore *sem) { __down_write_nested(sem, 0); } -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct rw_anon_semaphore *sem) { int tmp; @@ -117,7 +121,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct rw_anon_semaphore *sem) { int tmp; @@ -129,7 +133,7 @@ static inline void __up_read(struct rw_semaphore *sem) /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct rw_anon_semaphore *sem) { if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, (atomic_t *)(&sem->count)) < 0)) @@ -139,7 +143,7 @@ static inline void __up_write(struct rw_semaphore *sem) /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem) { atomic_add(delta, (atomic_t *)(&sem->count)); } @@ -147,7 +151,7 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct rw_anon_semaphore *sem) { int tmp; @@ -159,15 +163,59 @@ static inline void __downgrade_write(struct rw_semaphore *sem) /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem) { return atomic_add_return(delta, (atomic_t *)(&sem->count)); } +static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) +{ + return (sem->count != 0); +} + +#ifndef CONFIG_PREEMPT_RT + +struct rw_semaphore { + /* XXX this should be able to be an atomic_t -- paulus */ + signed int count; + spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ + __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key); +} + +#define init_rwsem(sem) \ + do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ + } while (0) + static inline int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); } +#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_RWSEM_H */ diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index e20ff75..3ddc8f6 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -30,26 +30,38 @@ struct mmu_gather; #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) +#define HAVE_ARCH_MMU_GATHER 1 + +struct pte_freelist_batch; + +struct arch_mmu_gather { + struct pte_freelist_batch *batch; +}; + +#define ARCH_MMU_GATHER_INIT (struct arch_mmu_gather){ .batch = NULL, } + #if !defined(CONFIG_PPC_STD_MMU) #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #elif defined(__powerpc64__) -extern void pte_free_finish(void); +extern void pte_free_finish(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb) { - struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); /* If there's a TLB batch pending, then we must flush it because the * pages are going to be freed and we really don't want to have a CPU * access a freed page because it has a stale TLB */ - if (tlbbatch->index) + if (tlbbatch->index) { __flush_tlb_pending(tlbbatch); + } - pte_free_finish(); + put_cpu_var(ppc64_tlb_batch); + pte_free_finish(tlb); } #else diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index abbe341..3f67596 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -101,18 +101,25 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, static inline void arch_enter_lazy_mmu_mode(void) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); batch->active = 1; + + put_cpu_var(ppc64_tlb_batch); } static inline void arch_leave_lazy_mmu_mode(void) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); + + if (batch->active) { + if (batch->index) { + __flush_tlb_pending(batch); + } + batch->active = 0; + } - if (batch->index) - __flush_tlb_pending(batch); - batch->active = 0; + put_cpu_var(ppc64_tlb_batch); } #define arch_flush_lazy_mmu_mode() do {} while (0) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 43e0734..4bb9ce4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -625,44 +625,52 @@ do_work: bne restore /* here we are preempting the current task */ 1: + /* + * preempt_schedule_irq() expects interrupts disabled and returns + * with interrupts disabled. No need to check preemption again, + * preempt_schedule_irq just did that for us. + */ + bl .preempt_schedule_irq #ifdef CONFIG_TRACE_IRQFLAGS bl .trace_hardirqs_on +#endif /* CONFIG_TRACE_IRQFLAGS */ + /* Note: we just clobbered r10 which used to contain the previous * MSR before the hard-disabling done by the caller of do_work. * We don't have that value anymore, but it doesn't matter as * we will hard-enable unconditionally, we can just reload the * current MSR into r10 */ + bl .preempt_schedule_irq mfmsr r10 -#endif /* CONFIG_TRACE_IRQFLAGS */ - li r0,1 - stb r0,PACASOFTIRQEN(r13) - stb r0,PACAHARDIRQEN(r13) - ori r10,r10,MSR_EE - mtmsrd r10,1 /* reenable interrupts */ - bl .preempt_schedule - mfmsr r10 - clrrdi r9,r1,THREAD_SHIFT - rldicl r10,r10,48,1 /* disable interrupts again */ - rotldi r10,r10,16 - mtmsrd r10,1 - ld r4,TI_FLAGS(r9) - andi. r0,r4,_TIF_NEED_RESCHED - bne 1b + clrrdi r9,r1,THREAD_SHIFT + rldicl r10,r10,48,1 /* disable interrupts again */ + rotldi r10,r10,16 + mtmsrd r10,1 + ld r4,TI_FLAGS(r9) + andi. r0,r4,(_TIF_NEED_RESCHED) + bne 1b b restore user_work: #endif - /* Enable interrupts */ - ori r10,r10,MSR_EE - mtmsrd r10,1 - andi. r0,r4,_TIF_NEED_RESCHED beq 1f - bl .schedule + + /* preempt_schedule_irq() expects interrupts disabled. */ + bl .preempt_schedule_irq b .ret_from_except_lite -1: bl .save_nvgprs + /* here we are preempting the current task */ +1: li r0,1 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + + /* Enable interrupts */ + ori r10,r10,MSR_EE + mtmsrd r10,1 + + bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .do_signal b .ret_from_except diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 88d9c1d..1a82d48 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -96,9 +96,11 @@ void cpu_idle(void) tick_nohz_restart_sched_tick(); if (cpu_should_die()) cpu_die(); - preempt_enable_no_resched(); - schedule(); + local_irq_disable(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); + local_irq_enable(); } } diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index f7f376e..9f56521 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -191,7 +191,7 @@ int show_interrupts(struct seq_file *p, void *v) if (i < NR_IRQS) { desc = get_irq_desc(i); - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); action = desc->action; if (!action || !action->handler) goto skip; @@ -212,7 +212,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } else if (i == NR_IRQS) { #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) if (tau_initialized){ @@ -453,7 +453,7 @@ void do_softirq(void) */ static LIST_HEAD(irq_hosts); -static DEFINE_SPINLOCK(irq_big_lock); +static DEFINE_ATOMIC_SPINLOCK(irq_big_lock); static unsigned int revmap_trees_allocated; static DEFINE_MUTEX(revmap_trees_mutex); struct irq_map_entry irq_map[NR_IRQS]; @@ -499,14 +499,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, if (host->ops->match == NULL) host->ops->match = default_irq_host_match; - spin_lock_irqsave(&irq_big_lock, flags); + atomic_spin_lock_irqsave(&irq_big_lock, flags); /* If it's a legacy controller, check for duplicates and * mark it as allocated (we use irq 0 host pointer for that */ if (revmap_type == IRQ_HOST_MAP_LEGACY) { if (irq_map[0].host != NULL) { - spin_unlock_irqrestore(&irq_big_lock, flags); + atomic_spin_unlock_irqrestore(&irq_big_lock, flags); /* If we are early boot, we can't free the structure, * too bad... * this will be fixed once slab is made available early @@ -520,7 +520,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, } list_add(&host->link, &irq_hosts); - spin_unlock_irqrestore(&irq_big_lock, flags); + atomic_spin_unlock_irqrestore(&irq_big_lock, flags); /* Additional setups per revmap type */ switch(revmap_type) { @@ -571,13 +571,13 @@ struct irq_host *irq_find_host(struct device_node *node) * the absence of a device node. This isn't a problem so far * yet though... */ - spin_lock_irqsave(&irq_big_lock, flags); + atomic_spin_lock_irqsave(&irq_big_lock, flags); list_for_each_entry(h, &irq_hosts, link) if (h->ops->match(h, node)) { found = h; break; } - spin_unlock_irqrestore(&irq_big_lock, flags); + atomic_spin_unlock_irqrestore(&irq_big_lock, flags); return found; } EXPORT_SYMBOL_GPL(irq_find_host); @@ -935,7 +935,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) return NO_IRQ; - spin_lock_irqsave(&irq_big_lock, flags); + atomic_spin_lock_irqsave(&irq_big_lock, flags); /* Use hint for 1 interrupt if any */ if (count == 1 && hint >= NUM_ISA_INTERRUPTS && @@ -959,7 +959,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, } } if (found == NO_IRQ) { - spin_unlock_irqrestore(&irq_big_lock, flags); + atomic_spin_unlock_irqrestore(&irq_big_lock, flags); return NO_IRQ; } hint_found: @@ -968,7 +968,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, smp_wmb(); irq_map[i].host = host; } - spin_unlock_irqrestore(&irq_big_lock, flags); + atomic_spin_unlock_irqrestore(&irq_big_lock, flags); return found; } @@ -980,7 +980,7 @@ void irq_free_virt(unsigned int virq, unsigned int count) WARN_ON (virq < NUM_ISA_INTERRUPTS); WARN_ON (count == 0 || (virq + count) > irq_virq_count); - spin_lock_irqsave(&irq_big_lock, flags); + atomic_spin_lock_irqsave(&irq_big_lock, flags); for (i = virq; i < (virq + count); i++) { struct irq_host *host; @@ -993,7 +993,7 @@ void irq_free_virt(unsigned int virq, unsigned int count) smp_wmb(); irq_map[i].host = NULL; } - spin_unlock_irqrestore(&irq_big_lock, flags); + atomic_spin_unlock_irqrestore(&irq_big_lock, flags); } void irq_early_init(void) @@ -1065,7 +1065,7 @@ static int virq_debug_show(struct seq_file *m, void *private) for (i = 1; i < NR_IRQS; i++) { desc = get_irq_desc(i); - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); if (desc->action && desc->action->handler) { seq_printf(m, "%5d ", i); @@ -1084,7 +1084,7 @@ static int virq_debug_show(struct seq_file *m, void *private) seq_printf(m, "%s\n", p); } - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } return 0; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index c932978..dcbf960 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -263,7 +263,7 @@ ss_probe: kcb->kprobe_status = KPROBE_HIT_SSDONE; reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); return 1; } else if (ret < 0) { /* @@ -282,7 +282,7 @@ ss_probe: return 1; no_kprobe: - preempt_enable_no_resched(); + preempt_enable(); return ret; } @@ -412,7 +412,7 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) } reset_current_kprobe(); out: - preempt_enable_no_resched(); + preempt_enable(); /* * if somebody else is singlestepping across a probe point, msr diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c index 0516e2d..e38729a 100644 --- a/arch/powerpc/kernel/pmc.c +++ b/arch/powerpc/kernel/pmc.c @@ -37,7 +37,7 @@ static void dummy_perf(struct pt_regs *regs) } -static DEFINE_SPINLOCK(pmc_owner_lock); +static DEFINE_ATOMIC_SPINLOCK(pmc_owner_lock); static void *pmc_owner_caller; /* mostly for debugging */ perf_irq_t perf_irq = dummy_perf; @@ -45,7 +45,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq) { int err = 0; - spin_lock(&pmc_owner_lock); + atomic_spin_lock(&pmc_owner_lock); if (pmc_owner_caller) { printk(KERN_WARNING "reserve_pmc_hardware: " @@ -59,21 +59,21 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq) perf_irq = new_perf_irq ? new_perf_irq : dummy_perf; out: - spin_unlock(&pmc_owner_lock); + atomic_spin_unlock(&pmc_owner_lock); return err; } EXPORT_SYMBOL_GPL(reserve_pmc_hardware); void release_pmc_hardware(void) { - spin_lock(&pmc_owner_lock); + atomic_spin_lock(&pmc_owner_lock); WARN_ON(! pmc_owner_caller); pmc_owner_caller = NULL; perf_irq = dummy_perf; - spin_unlock(&pmc_owner_lock); + atomic_spin_unlock(&pmc_owner_lock); } EXPORT_SYMBOL_GPL(release_pmc_hardware); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 892a9f2..ef9d506 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -305,6 +305,10 @@ struct task_struct *__switch_to(struct task_struct *prev, struct thread_struct *new_thread, *old_thread; unsigned long flags; struct task_struct *last; +#if defined(CONFIG_PPC64) && defined (CONFIG_PREEMPT_RT) + struct ppc64_tlb_batch *batch; + int hadbatch; +#endif #ifdef CONFIG_SMP /* avoid complexity of lazy save/restore of fpu @@ -396,6 +400,17 @@ struct task_struct *__switch_to(struct task_struct *prev, old_thread->accum_tb += (current_tb - start_tb); new_thread->start_tb = current_tb; } + +#ifdef CONFIG_PREEMPT_RT + batch = &__get_cpu_var(ppc64_tlb_batch); + if (batch->active) { + hadbatch = 1; + if (batch->index) { + __flush_tlb_pending(batch); + } + batch->active = 0; + } +#endif /* #ifdef CONFIG_PREEMPT_RT */ #endif local_irq_save(flags); @@ -414,6 +429,13 @@ struct task_struct *__switch_to(struct task_struct *prev, local_irq_restore(flags); +#if defined(CONFIG_PPC64) && defined(CONFIG_PREEMPT_RT) + if (hadbatch) { + batch = &__get_cpu_var(ppc64_tlb_batch); + batch->active = 1; + } +#endif + return last; } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d4405b9..de2295b 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -81,7 +81,7 @@ struct boot_param_header *initial_boot_params; extern struct device_node *allnodes; /* temporary while merging */ -extern rwlock_t devtree_lock; /* temporary while merging */ +extern atomic_spinlock_t devtree_lock; /* temporary while merging */ /* export that to outside world */ struct device_node *of_chosen; @@ -1275,12 +1275,12 @@ struct device_node *of_find_node_by_phandle(phandle handle) { struct device_node *np; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); for (np = allnodes; np != 0; np = np->allnext) if (np->linux_phandle == handle) break; of_node_get(np); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_phandle); @@ -1328,13 +1328,13 @@ struct device_node *of_find_all_nodes(struct device_node *prev) { struct device_node *np; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); np = prev ? prev->allnext : allnodes; for (; np != 0; np = np->allnext) if (of_node_get(np)) break; of_node_put(prev); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_all_nodes); @@ -1419,12 +1419,12 @@ void of_attach_node(struct device_node *np) { unsigned long flags; - write_lock_irqsave(&devtree_lock, flags); + atomic_spin_lock_irqsave(&devtree_lock, flags); np->sibling = np->parent->child; np->allnext = allnodes; np->parent->child = np; allnodes = np; - write_unlock_irqrestore(&devtree_lock, flags); + atomic_spin_unlock_irqrestore(&devtree_lock, flags); } /* @@ -1437,7 +1437,7 @@ void of_detach_node(struct device_node *np) struct device_node *parent; unsigned long flags; - write_lock_irqsave(&devtree_lock, flags); + atomic_spin_lock_irqsave(&devtree_lock, flags); parent = np->parent; if (!parent) @@ -1468,7 +1468,7 @@ void of_detach_node(struct device_node *np) of_node_set_flag(np, OF_DETACHED); out_unlock: - write_unlock_irqrestore(&devtree_lock, flags); + atomic_spin_unlock_irqrestore(&devtree_lock, flags); } #ifdef CONFIG_PPC_PSERIES @@ -1552,18 +1552,18 @@ int prom_add_property(struct device_node* np, struct property* prop) unsigned long flags; prop->next = NULL; - write_lock_irqsave(&devtree_lock, flags); + atomic_spin_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (strcmp(prop->name, (*next)->name) == 0) { /* duplicate ! don't insert it */ - write_unlock_irqrestore(&devtree_lock, flags); + atomic_spin_unlock_irqrestore(&devtree_lock, flags); return -1; } next = &(*next)->next; } *next = prop; - write_unlock_irqrestore(&devtree_lock, flags); + atomic_spin_unlock_irqrestore(&devtree_lock, flags); #ifdef CONFIG_PROC_DEVICETREE /* try to add to proc as well if it was initialized */ @@ -1586,7 +1586,7 @@ int prom_remove_property(struct device_node *np, struct property *prop) unsigned long flags; int found = 0; - write_lock_irqsave(&devtree_lock, flags); + atomic_spin_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == prop) { @@ -1599,7 +1599,7 @@ int prom_remove_property(struct device_node *np, struct property *prop) } next = &(*next)->next; } - write_unlock_irqrestore(&devtree_lock, flags); + atomic_spin_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; @@ -1628,7 +1628,7 @@ int prom_update_property(struct device_node *np, unsigned long flags; int found = 0; - write_lock_irqsave(&devtree_lock, flags); + atomic_spin_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == oldprop) { @@ -1642,7 +1642,7 @@ int prom_update_property(struct device_node *np, } next = &(*next)->next; } - write_unlock_irqrestore(&devtree_lock, flags); + atomic_spin_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index eae4511..15d4291 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -1000,7 +1000,7 @@ void __init time_init(void) /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ boot_tb = get_tb_or_rtc(); - write_seqlock_irqsave(&xtime_lock, flags); + write_atomic_seqlock_irqsave(&xtime_lock, flags); /* If platform provided a timezone (pmac), we correct the time */ if (timezone_offset) { @@ -1014,7 +1014,7 @@ void __init time_init(void) vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; vdso_data->tb_to_xs = tb_to_xs; - write_sequnlock_irqrestore(&xtime_lock, flags); + write_atomic_sequnlock_irqrestore(&xtime_lock, flags); /* Register the clocksource, if we're not running on iSeries */ if (!firmware_has_feature(FW_FEATURE_ISERIES)) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6f0ae1a..451a756 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -102,11 +102,11 @@ static inline void pmac_backlight_unblank(void) { } int die(const char *str, struct pt_regs *regs, long err) { static struct { - spinlock_t lock; + atomic_spinlock_t lock; u32 lock_owner; int lock_owner_depth; } die = { - .lock = __SPIN_LOCK_UNLOCKED(die.lock), + .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(die.lock), .lock_owner = -1, .lock_owner_depth = 0 }; @@ -120,7 +120,7 @@ int die(const char *str, struct pt_regs *regs, long err) if (die.lock_owner != raw_smp_processor_id()) { console_verbose(); - spin_lock_irqsave(&die.lock, flags); + atomic_spin_lock_irqsave(&die.lock, flags); die.lock_owner = smp_processor_id(); die.lock_owner_depth = 0; bust_spinlocks(1); @@ -155,7 +155,7 @@ int die(const char *str, struct pt_regs *regs, long err) bust_spinlocks(0); die.lock_owner = -1; add_taint(TAINT_DIE); - spin_unlock_irqrestore(&die.lock, flags); + atomic_spin_unlock_irqrestore(&die.lock, flags); if (kexec_should_crash(current) || kexec_sr_activated(smp_processor_id())) @@ -193,6 +193,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) addr, regs->nip, regs->link, code); } +#ifdef CONFIG_PREEMPT_RT + local_irq_enable(); + preempt_check_resched(); +#endif + memset(&info, 0, sizeof(info)); info.si_signo = signr; info.si_code = code; diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 79d0fa3..106d6a5 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -86,8 +86,10 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock) { while (lock->slock) { HMT_low(); + preempt_disable(); if (SHARED_PROCESSOR) __spin_yield(lock); + preempt_enable(); } HMT_medium(); } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 830bef0..03c4343 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -159,7 +159,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, } #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ - if (in_atomic() || mm == NULL) { + if (in_atomic() || mm == NULL || current->pagefault_disabled) { if (!user_mode(regs)) return SIGSEGV; /* in_atomic() in user mode is really bad, diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 056d23a..a99d114 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -37,7 +37,7 @@ #define HPTE_LOCK_BIT 3 -static DEFINE_SPINLOCK(native_tlbie_lock); +static DEFINE_ATOMIC_SPINLOCK(native_tlbie_lock); static inline void __tlbie(unsigned long va, int psize, int ssize) { @@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) if (use_local) use_local = mmu_psize_defs[psize].tlbiel; if (lock_tlbie && !use_local) - spin_lock(&native_tlbie_lock); + atomic_spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { __tlbiel(va, psize, ssize); @@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) - spin_unlock(&native_tlbie_lock); + atomic_spin_unlock(&native_tlbie_lock); } static inline void native_lock_hpte(struct hash_pte *hptep) @@ -434,7 +434,7 @@ static void native_hpte_clear(void) /* we take the tlbie lock and hold it. Some hardware will * deadlock if we try to tlbie from two processors at once. */ - spin_lock(&native_tlbie_lock); + atomic_spin_lock(&native_tlbie_lock); slots = pteg_count * HPTES_PER_GROUP; @@ -458,7 +458,7 @@ static void native_hpte_clear(void) } asm volatile("eieio; tlbsync; ptesync":::"memory"); - spin_unlock(&native_tlbie_lock); + atomic_spin_unlock(&native_tlbie_lock); local_irq_restore(flags); } @@ -521,7 +521,7 @@ static void native_flush_hash_range(unsigned long number, int local) int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) - spin_lock(&native_tlbie_lock); + atomic_spin_lock(&native_tlbie_lock); asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { @@ -536,7 +536,7 @@ static void native_flush_hash_range(unsigned long number, int local) asm volatile("eieio; tlbsync; ptesync":::"memory"); if (lock_tlbie) - spin_unlock(&native_tlbie_lock); + atomic_spin_unlock(&native_tlbie_lock); } local_irq_restore(flags); diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c index c2186c7..81310e2 100644 --- a/arch/powerpc/mm/highmem.c +++ b/arch/powerpc/mm/highmem.c @@ -35,6 +35,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -73,5 +74,6 @@ void kunmap_atomic(void *kvaddr, enum km_type type) local_flush_tlb_page(NULL, vaddr); #endif pagefault_enable(); + preempt_enable(); } EXPORT_SYMBOL(kunmap_atomic); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9920d6a..c46ef2f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, pmd = pmd_offset(pud, start); pud_clear(pud); - pmd_free_tlb(tlb, pmd); + pmd_free_tlb(tlb, pmd, start); } static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, @@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, pud = pud_offset(pgd, start); pgd_clear(pgd); - pud_free_tlb(tlb, pud); + pud_free_tlb(tlb, pud, start); } /* diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 3de6a0d..3ef5084 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -54,8 +54,6 @@ #endif #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - phys_addr_t total_memory; phys_addr_t total_lowmem; diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 92a1971..fb446e4 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -46,7 +46,7 @@ static unsigned int next_context, nr_free_contexts; static unsigned long *context_map; static unsigned long *stale_map[NR_CPUS]; static struct mm_struct **context_mm; -static DEFINE_SPINLOCK(context_lock); +static DEFINE_ATOMIC_SPINLOCK(context_lock); #define CTX_MAP_SIZE \ (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) @@ -104,9 +104,9 @@ static unsigned int steal_context_smp(unsigned int id) /* This will happen if you have more CPUs than available contexts, * all we can do here is wait a bit and try again */ - spin_unlock(&context_lock); + atomic_spin_unlock(&context_lock); cpu_relax(); - spin_lock(&context_lock); + atomic_spin_lock(&context_lock); /* This will cause the caller to try again */ return MMU_NO_CONTEXT; @@ -177,7 +177,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) unsigned long *map; /* No lockless fast path .. yet */ - spin_lock(&context_lock); + atomic_spin_lock(&context_lock); #ifndef DEBUG_STEAL_ONLY pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n", @@ -257,7 +257,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) /* Flick the MMU and release lock */ set_context(id, next->pgd); - spin_unlock(&context_lock); + atomic_spin_unlock(&context_lock); } /* @@ -284,7 +284,7 @@ void destroy_context(struct mm_struct *mm) WARN_ON(mm->context.active != 0); - spin_lock_irqsave(&context_lock, flags); + atomic_spin_lock_irqsave(&context_lock, flags); id = mm->context.id; if (id != MMU_NO_CONTEXT) { __clear_bit(id, context_map); @@ -295,7 +295,7 @@ void destroy_context(struct mm_struct *mm) context_mm[id] = NULL; nr_free_contexts++; } - spin_unlock_irqrestore(&context_lock, flags); + atomic_spin_unlock_irqrestore(&context_lock, flags); } #ifdef CONFIG_SMP diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 627767d..c8a2904 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -30,7 +30,6 @@ #include #include -static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); static unsigned long pte_freelist_forced_free; struct pte_freelist_batch @@ -81,11 +80,11 @@ static void pte_free_submit(struct pte_freelist_batch *batch) void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + struct pte_freelist_batch **batchp; - if (atomic_read(&tlb->mm->mm_users) < 2 || - cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ + batchp = &tlb->arch.batch; + + if (atomic_read(&tlb->mm->mm_users) < 2) { pgtable_free(pgf); return; } @@ -105,15 +104,16 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) } } -void pte_free_finish(void) +void pte_free_finish(struct mmu_gather *tlb) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + struct pte_freelist_batch **batchp; - if (*batchp == NULL) - return; - pte_free_submit(*batchp); - *batchp = NULL; + batchp = &tlb->arch.batch; + + if (*batchp) { + pte_free_submit(*batchp); + *batchp = NULL; + } } /* diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 937eb90..33c458f 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -30,14 +30,10 @@ #include #include #include +#include DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); -/* This is declared as we are using the more or less generic - * arch/powerpc/include/asm/tlb.h file -- tgall - */ -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - /* * A linux PTE was changed and the corresponding hash table entry * neesd to be flushed. This function will either perform the flush @@ -49,7 +45,7 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); unsigned long vsid, vaddr; unsigned int psize; int ssize; @@ -100,6 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, */ if (!batch->active) { flush_hash_page(vaddr, rpte, psize, ssize, 0); + put_cpu_var(ppc64_tlb_batch); return; } @@ -126,8 +123,22 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, batch->pte[i] = rpte; batch->vaddr[i] = vaddr; batch->index = ++i; + +#ifdef CONFIG_PREEMPT_RT + /* + * Since flushing tlb needs expensive hypervisor call(s) on celleb, + * always flush it on RT to reduce scheduling latency. + */ + if (machine_is(celleb)) { + __flush_tlb_pending(batch); + put_cpu_var(ppc64_tlb_batch); + return; + } +#endif /* CONFIG_PREEMPT_RT */ + if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); + put_cpu_var(ppc64_tlb_batch); } /* diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index ad2eb4d..f627eb6 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -85,7 +85,7 @@ EXPORT_SYMBOL(local_flush_tlb_page); */ #ifdef CONFIG_SMP -static DEFINE_SPINLOCK(tlbivax_lock); +static DEFINE_ATOMIC_SPINLOCK(tlbivax_lock); struct tlb_flush_param { unsigned long addr; @@ -158,10 +158,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); if (lock) - spin_lock(&tlbivax_lock); + atomic_spin_lock(&tlbivax_lock); _tlbivax_bcast(vmaddr, pid); if (lock) - spin_unlock(&tlbivax_lock); + atomic_spin_unlock(&tlbivax_lock); goto bail; } else { struct tlb_flush_param p = { .pid = pid, .addr = vmaddr }; @@ -189,7 +189,9 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) _tlbil_pid(0); preempt_enable(); #else + preempt_disable(); _tlbil_pid(0); + preempt_enable(); #endif } EXPORT_SYMBOL(flush_tlb_kernel_range); diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 68e4f16..afffc6d 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) u32 status, enable; /* Mask off the cascaded IRQ */ - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); desc->chip->mask(virq); - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs * are pending. 'ffs()' is 1 based */ @@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) } /* Processing done; can reenable the cascade now */ - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED)) desc->chip->unmask(virq); - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); } static int media5200_irq_map(struct irq_host *h, unsigned int virq, diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c index 35b1ec4..e1afbdc 100644 --- a/arch/powerpc/platforms/cell/beat_htab.c +++ b/arch/powerpc/platforms/cell/beat_htab.c @@ -40,7 +40,7 @@ #define DBG_LOW(fmt...) do { } while (0) #endif -static DEFINE_SPINLOCK(beat_htab_lock); +static DEFINE_ATOMIC_SPINLOCK(beat_htab_lock); static inline unsigned int beat_read_mask(unsigned hpte_group) { @@ -114,18 +114,18 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group, if (rflags & _PAGE_NO_CACHE) hpte_r &= ~_PAGE_COHERENT; - spin_lock(&beat_htab_lock); + atomic_spin_lock(&beat_htab_lock); lpar_rc = beat_read_mask(hpte_group); if (lpar_rc == 0) { if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" full\n"); - spin_unlock(&beat_htab_lock); + atomic_spin_unlock(&beat_htab_lock); return -1; } lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48, hpte_v, hpte_r, &slot); - spin_unlock(&beat_htab_lock); + atomic_spin_unlock(&beat_htab_lock); /* * Since we try and ioremap PHBs we don't own, the pte insert @@ -198,17 +198,17 @@ static long beat_lpar_hpte_updatepp(unsigned long slot, "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", want_v & HPTE_V_AVPN, slot, psize, newpp); - spin_lock(&beat_htab_lock); + atomic_spin_lock(&beat_htab_lock); dummy0 = beat_lpar_hpte_getword0(slot); if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) { DBG_LOW("not found !\n"); - spin_unlock(&beat_htab_lock); + atomic_spin_unlock(&beat_htab_lock); return -1; } lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0, &dummy1); - spin_unlock(&beat_htab_lock); + atomic_spin_unlock(&beat_htab_lock); if (lpar_rc != 0 || dummy0 == 0) { DBG_LOW("not found !\n"); return -1; @@ -262,13 +262,13 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp, vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); va = (vsid << 28) | (ea & 0x0fffffff); - spin_lock(&beat_htab_lock); + atomic_spin_lock(&beat_htab_lock); slot = beat_lpar_hpte_find(va, psize); BUG_ON(slot == -1); lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0, &dummy1); - spin_unlock(&beat_htab_lock); + atomic_spin_unlock(&beat_htab_lock); BUG_ON(lpar_rc != 0); } @@ -285,18 +285,18 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, slot, va, psize, local); want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); - spin_lock_irqsave(&beat_htab_lock, flags); + atomic_spin_lock_irqsave(&beat_htab_lock, flags); dummy1 = beat_lpar_hpte_getword0(slot); if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) { DBG_LOW("not found !\n"); - spin_unlock_irqrestore(&beat_htab_lock, flags); + atomic_spin_unlock_irqrestore(&beat_htab_lock, flags); return; } lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0, &dummy1, &dummy2); - spin_unlock_irqrestore(&beat_htab_lock, flags); + atomic_spin_unlock_irqrestore(&beat_htab_lock, flags); BUG_ON(lpar_rc != 0); } diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 7225484..e99c998 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -30,7 +30,7 @@ #include "beat_wrapper.h" #define MAX_IRQS NR_IRQS -static DEFINE_SPINLOCK(beatic_irq_mask_lock); +static DEFINE_ATOMIC_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; @@ -65,30 +65,30 @@ static void beatic_mask_irq(unsigned int irq_plug) { unsigned long flags; - spin_lock_irqsave(&beatic_irq_mask_lock, flags); + atomic_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); beatic_update_irq_mask(irq_plug); - spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); + atomic_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_unmask_irq(unsigned int irq_plug) { unsigned long flags; - spin_lock_irqsave(&beatic_irq_mask_lock, flags); + atomic_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); beatic_update_irq_mask(irq_plug); - spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); + atomic_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_ack_irq(unsigned int irq_plug) { unsigned long flags; - spin_lock_irqsave(&beatic_irq_mask_lock, flags); + atomic_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); beatic_update_irq_mask(irq_plug); - spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); + atomic_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_end_irq(unsigned int irq_plug) @@ -103,10 +103,10 @@ static void beatic_end_irq(unsigned int irq_plug) printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); } - spin_lock_irqsave(&beatic_irq_mask_lock, flags); + atomic_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); beatic_update_irq_mask(irq_plug); - spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); + atomic_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static struct irq_chip beatic_pic = { diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 882e470..0d3022e 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -237,7 +237,7 @@ extern int noirqdebug; static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) { - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); @@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) goto out_eoi; desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; out_eoi: desc->chip->eoi(irq); - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); } static int iic_host_map(struct irq_host *h, unsigned int virq, diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c index 054dfe5..8f1d8cd 100644 --- a/arch/powerpc/platforms/chrp/time.c +++ b/arch/powerpc/platforms/chrp/time.c @@ -83,7 +83,12 @@ int chrp_set_rtc_time(struct rtc_time *tmarg) unsigned char save_control, save_freq_select; struct rtc_time tm = *tmarg; +#if CONFIG_PREEMPT_RT + if (!spin_trylock(&rtc_lock)) + return -1; +#else spin_lock(&rtc_lock); +#endif save_control = chrp_cmos_clock_read(RTC_CONTROL); /* tell the clock it's being set */ diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 94f4447..af7cce0 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c @@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs() struct irq_desc *desc = get_irq_desc(irq); if (desc && desc->chip && desc->chip->startup) { - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); desc->chip->startup(irq); - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } } diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index e6c0040..276c335 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -59,10 +59,10 @@ extern struct device_node *k2_skiplist[2]; * We use a single global lock to protect accesses. Each driver has * to take care of its own locking */ -DEFINE_SPINLOCK(feature_lock); +DEFINE_ATOMIC_SPINLOCK(feature_lock); -#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); -#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); +#define LOCK(flags) atomic_spin_lock_irqsave(&feature_lock, flags); +#define UNLOCK(flags) atomic_spin_unlock_irqrestore(&feature_lock, flags); /* diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index c6f0f9e..7d57748 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -80,7 +80,7 @@ static int is_core_99; static int core99_bank = 0; static int nvram_partitions[3]; // XXX Turn that into a sem -static DEFINE_SPINLOCK(nv_lock); +static DEFINE_ATOMIC_SPINLOCK(nv_lock); static int (*core99_write_bank)(int bank, u8* datas); static int (*core99_erase_bank)(int bank); @@ -165,10 +165,10 @@ static unsigned char indirect_nvram_read_byte(int addr) unsigned char val; unsigned long flags; - spin_lock_irqsave(&nv_lock, flags); + atomic_spin_lock_irqsave(&nv_lock, flags); out_8(nvram_addr, addr >> 5); val = in_8(&nvram_data[(addr & 0x1f) << 4]); - spin_unlock_irqrestore(&nv_lock, flags); + atomic_spin_unlock_irqrestore(&nv_lock, flags); return val; } @@ -177,10 +177,10 @@ static void indirect_nvram_write_byte(int addr, unsigned char val) { unsigned long flags; - spin_lock_irqsave(&nv_lock, flags); + atomic_spin_lock_irqsave(&nv_lock, flags); out_8(nvram_addr, addr >> 5); out_8(&nvram_data[(addr & 0x1f) << 4], val); - spin_unlock_irqrestore(&nv_lock, flags); + atomic_spin_unlock_irqrestore(&nv_lock, flags); } @@ -481,7 +481,7 @@ static void core99_nvram_sync(void) if (!is_core_99 || !nvram_data || !nvram_image) return; - spin_lock_irqsave(&nv_lock, flags); + atomic_spin_lock_irqsave(&nv_lock, flags); if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, NVRAM_SIZE)) goto bail; @@ -503,7 +503,7 @@ static void core99_nvram_sync(void) if (core99_write_bank(core99_bank, nvram_image)) printk("nvram: Error writing bank %d\n", core99_bank); bail: - spin_unlock_irqrestore(&nv_lock, flags); + atomic_spin_unlock_irqrestore(&nv_lock, flags); #ifdef DEBUG mdelay(2000); diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index db20de5..3ab56ec 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -50,13 +50,13 @@ static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask) value = ~value; /* Toggle the GPIO */ - spin_lock_irqsave(&feature_lock, flags); + atomic_spin_lock_irqsave(&feature_lock, flags); tmp = readb(addr); tmp = (tmp & ~mask) | (value & mask); DBG("Do write 0x%02x to GPIO %s (%p)\n", tmp, func->node->full_name, addr); writeb(tmp, addr); - spin_unlock_irqrestore(&feature_lock, flags); + atomic_spin_unlock_irqrestore(&feature_lock, flags); return 0; } @@ -145,9 +145,9 @@ static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask) struct macio_chip *macio = func->driver_data; unsigned long flags; - spin_lock_irqsave(&feature_lock, flags); + atomic_spin_lock_irqsave(&feature_lock, flags); MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask)); - spin_unlock_irqrestore(&feature_lock, flags); + atomic_spin_unlock_irqrestore(&feature_lock, flags); return 0; } @@ -168,9 +168,9 @@ static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask) struct macio_chip *macio = func->driver_data; unsigned long flags; - spin_lock_irqsave(&feature_lock, flags); + atomic_spin_lock_irqsave(&feature_lock, flags); MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask)); - spin_unlock_irqrestore(&feature_lock, flags); + atomic_spin_unlock_irqrestore(&feature_lock, flags); return 0; } @@ -223,12 +223,12 @@ static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift, if (args == NULL || args->count == 0) return -EINVAL; - spin_lock_irqsave(&feature_lock, flags); + atomic_spin_lock_irqsave(&feature_lock, flags); tmp = MACIO_IN32(offset); val = args->u[0].v << shift; tmp = (tmp & ~mask) | (val & mask); MACIO_OUT32(offset, tmp); - spin_unlock_irqrestore(&feature_lock, flags); + atomic_spin_unlock_irqrestore(&feature_lock, flags); return 0; } @@ -243,12 +243,12 @@ static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift, if (args == NULL || args->count == 0) return -EINVAL; - spin_lock_irqsave(&feature_lock, flags); + atomic_spin_lock_irqsave(&feature_lock, flags); tmp = MACIO_IN8(offset); val = args->u[0].v << shift; tmp = (tmp & ~mask) | (val & mask); MACIO_OUT8(offset, tmp); - spin_unlock_irqrestore(&feature_lock, flags); + atomic_spin_unlock_irqrestore(&feature_lock, flags); return 0; } @@ -278,12 +278,12 @@ static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask) { unsigned long flags; - spin_lock_irqsave(&feature_lock, flags); + atomic_spin_lock_irqsave(&feature_lock, flags); /* This is fairly bogus in darwin, but it should work for our needs * implemeted that way: */ UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask)); - spin_unlock_irqrestore(&feature_lock, flags); + atomic_spin_unlock_irqrestore(&feature_lock, flags); return 0; } diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index d212006..9814414 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -57,7 +57,7 @@ static int max_irqs; static int max_real_irqs; static u32 level_mask[4]; -static DEFINE_SPINLOCK(pmac_pic_lock); +static DEFINE_ATOMIC_SPINLOCK(pmac_pic_lock); #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; @@ -85,7 +85,7 @@ static void pmac_mask_and_ack_irq(unsigned int virq) int i = src >> 5; unsigned long flags; - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); __clear_bit(src, ppc_cached_irq_mask); if (__test_and_clear_bit(src, ppc_lost_interrupts)) atomic_dec(&ppc_n_lost_interrupts); @@ -97,7 +97,7 @@ static void pmac_mask_and_ack_irq(unsigned int virq) mb(); } while((in_le32(&pmac_irq_hw[i]->enable) & bit) != (ppc_cached_irq_mask[i] & bit)); - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void pmac_ack_irq(unsigned int virq) @@ -107,12 +107,12 @@ static void pmac_ack_irq(unsigned int virq) int i = src >> 5; unsigned long flags; - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); if (__test_and_clear_bit(src, ppc_lost_interrupts)) atomic_dec(&ppc_n_lost_interrupts); out_le32(&pmac_irq_hw[i]->ack, bit); (void)in_le32(&pmac_irq_hw[i]->ack); - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) @@ -152,12 +152,12 @@ static unsigned int pmac_startup_irq(unsigned int virq) unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); if ((irq_desc[virq].status & IRQ_LEVEL) == 0) out_le32(&pmac_irq_hw[i]->ack, bit); __set_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 0); - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); return 0; } @@ -167,10 +167,10 @@ static void pmac_mask_irq(unsigned int virq) unsigned long flags; unsigned int src = irq_map[virq].hwirq; - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); __clear_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 1); - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void pmac_unmask_irq(unsigned int virq) @@ -178,19 +178,19 @@ static void pmac_unmask_irq(unsigned int virq) unsigned long flags; unsigned int src = irq_map[virq].hwirq; - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); __set_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 0); - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static int pmac_retrigger(unsigned int virq) { unsigned long flags; - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); __pmac_retrigger(irq_map[virq].hwirq); - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); return 1; } @@ -210,7 +210,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id) int irq, bits; int rc = IRQ_NONE; - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { int i = irq >> 5; bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; @@ -220,12 +220,12 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id) if (bits == 0) continue; irq += __ilog2(bits); - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); generic_handle_irq(irq); - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); rc = IRQ_HANDLED; } - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); return rc; } @@ -244,7 +244,7 @@ static unsigned int pmac_pic_get_irq(void) return NO_IRQ_IGNORE; /* ignore, already handled */ } #endif /* CONFIG_SMP */ - spin_lock_irqsave(&pmac_pic_lock, flags); + atomic_spin_lock_irqsave(&pmac_pic_lock, flags); for (irq = max_real_irqs; (irq -= 32) >= 0; ) { int i = irq >> 5; bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; @@ -256,7 +256,7 @@ static unsigned int pmac_pic_get_irq(void) irq += __ilog2(bits); break; } - spin_unlock_irqrestore(&pmac_pic_lock, flags); + atomic_spin_unlock_irqrestore(&pmac_pic_lock, flags); if (unlikely(irq < 0)) return NO_IRQ; return irq_linear_revmap(pmac_pic_host, irq); diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 989d646..2db0689 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -100,7 +100,7 @@ int eeh_subsystem_enabled; EXPORT_SYMBOL(eeh_subsystem_enabled); /* Lock to avoid races due to multiple reports of an error */ -static DEFINE_SPINLOCK(confirm_error_lock); +static DEFINE_ATOMIC_SPINLOCK(confirm_error_lock); /* Buffer for reporting slot-error-detail rtas calls. Its here * in BSS, and not dynamically alloced, so that it ends up in @@ -436,7 +436,7 @@ static void __eeh_clear_slot(struct device_node *parent, int mode_flag) void eeh_clear_slot (struct device_node *dn, int mode_flag) { unsigned long flags; - spin_lock_irqsave(&confirm_error_lock, flags); + atomic_spin_lock_irqsave(&confirm_error_lock, flags); dn = find_device_pe (dn); @@ -447,7 +447,7 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag) PCI_DN(dn)->eeh_mode &= ~mode_flag; PCI_DN(dn)->eeh_check_count = 0; __eeh_clear_slot(dn, mode_flag); - spin_unlock_irqrestore(&confirm_error_lock, flags); + atomic_spin_unlock_irqrestore(&confirm_error_lock, flags); } /** @@ -506,7 +506,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) * in one slot might report errors simultaneously, and we * only want one error recovery routine running. */ - spin_lock_irqsave(&confirm_error_lock, flags); + atomic_spin_lock_irqsave(&confirm_error_lock, flags); rc = 1; if (pdn->eeh_mode & EEH_MODE_ISOLATED) { pdn->eeh_check_count ++; @@ -575,7 +575,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) * with other functions on this device, and functions under * bridges. */ eeh_mark_slot (dn, EEH_MODE_ISOLATED); - spin_unlock_irqrestore(&confirm_error_lock, flags); + atomic_spin_unlock_irqrestore(&confirm_error_lock, flags); eeh_send_failure_event (dn, dev); @@ -586,7 +586,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) return 1; dn_unlock: - spin_unlock_irqrestore(&confirm_error_lock, flags); + atomic_spin_unlock_irqrestore(&confirm_error_lock, flags); return rc; } @@ -1056,7 +1056,7 @@ void __init eeh_init(void) struct device_node *phb, *np; struct eeh_early_enable_info info; - spin_lock_init(&confirm_error_lock); + atomic_spin_lock_init(&confirm_error_lock); spin_lock_init(&slot_errbuf_lock); np = of_find_node_by_path("/rtas"); diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c index 0e8db67..82d4513 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/platforms/pseries/eeh_driver.c @@ -70,12 +70,12 @@ static int irq_in_use(unsigned int irq) { int rc = 0; unsigned long flags; - struct irq_desc *desc = irq_desc + irq; + struct irq_desc *desc = irq_desc + irq; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); if (desc->action) rc = 1; - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); return rc; } diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 661c8e0..bcdce0a 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -140,7 +140,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, return ret; } -static DEFINE_PER_CPU(u64 *, tce_page) = NULL; +static DEFINE_PER_CPU_LOCKED(u64 *, tce_page) = NULL; static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, @@ -154,13 +154,14 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long l, limit; long tcenum_start = tcenum, npages_start = npages; int ret = 0; + int cpu; if (npages == 1) { return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction, attrs); } - tcep = __get_cpu_var(tce_page); + tcep = get_cpu_var_locked(tce_page, &cpu); /* This is safe to do since interrupts are off when we're called * from iommu_alloc{,_sg}() @@ -169,10 +170,11 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, tcep = (u64 *)__get_free_page(GFP_ATOMIC); /* If allocation fails, fall back to the loop implementation */ if (!tcep) { + put_cpu_var_locked(tce_page, cpu); return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction, attrs); } - __get_cpu_var(tce_page) = tcep; + per_cpu_var_locked(tce_page, cpu) = tcep; } rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; @@ -216,6 +218,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, printk("\ttce[0] val = 0x%llx\n", tcep[0]); show_stack(current, (unsigned long *)__get_SP()); } + put_cpu_var_locked(tce_page, cpu); return ret; } diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c index b3cbac8..493d8de 100644 --- a/arch/powerpc/platforms/pseries/rtasd.c +++ b/arch/powerpc/platforms/pseries/rtasd.c @@ -208,7 +208,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) break; case ERR_TYPE_KERNEL_PANIC: default: - WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ + WARN_ON_ONCE_NONRT(!irqs_disabled()); /* @@@ DEBUG @@@ */ spin_unlock_irqrestore(&rtasd_log_lock, s); return; } @@ -228,7 +228,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) /* Check to see if we need to or have stopped logging */ if (fatal || !logging_enabled) { logging_enabled = 0; - WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ + WARN_ON_ONCE_NONRT(!irqs_disabled()); /* @@@ DEBUG @@@ */ spin_unlock_irqrestore(&rtasd_log_lock, s); return; } @@ -251,13 +251,13 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) else rtas_log_start += 1; - WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ + WARN_ON_ONCE_NONRT(!irqs_disabled()); /* @@@ DEBUG @@@ */ spin_unlock_irqrestore(&rtasd_log_lock, s); wake_up_interruptible(&rtas_log_wait); break; case ERR_TYPE_KERNEL_PANIC: default: - WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ + WARN_ON_ONCE_NONRT(!irqs_disabled()); /* @@@ DEBUG @@@ */ spin_unlock_irqrestore(&rtasd_log_lock, s); return; } diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 419f8a6..d4fbbdc 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -851,7 +851,7 @@ void xics_migrate_irqs_away(void) || desc->chip->set_affinity == NULL) continue; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { @@ -875,7 +875,7 @@ void xics_migrate_irqs_away(void) cpumask_setall(irq_desc[virq].affinity); desc->chip->set_affinity(virq, cpu_all_mask); unlock: - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); } } #endif diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index da38a1f..d5702c4 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) u32 intr_index; u32 have_shift = 0; - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { if (desc->chip->mask_ack) desc->chip->mask_ack(irq); @@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) break; } unlock: - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); } static int __devinit fsl_of_msi_probe(struct of_device *dev, diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index a96584a..60c4e42 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -23,7 +23,7 @@ static unsigned char cached_8259[2] = { 0xff, 0xff }; #define cached_A1 (cached_8259[0]) #define cached_21 (cached_8259[1]) -static DEFINE_SPINLOCK(i8259_lock); +static DEFINE_ATOMIC_SPINLOCK(i8259_lock); static struct irq_host *i8259_host; @@ -42,7 +42,7 @@ unsigned int i8259_irq(void) if (pci_intack) irq = readb(pci_intack); else { - spin_lock(&i8259_lock); + atomic_spin_lock(&i8259_lock); lock = 1; /* Perform an interrupt acknowledge cycle on controller 1. */ @@ -74,7 +74,7 @@ unsigned int i8259_irq(void) irq = NO_IRQ; if (lock) - spin_unlock(&i8259_lock); + atomic_spin_unlock(&i8259_lock); return irq; } @@ -82,7 +82,7 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr) { unsigned long flags; - spin_lock_irqsave(&i8259_lock, flags); + atomic_spin_lock_irqsave(&i8259_lock, flags); if (irq_nr > 7) { cached_A1 |= 1 << (irq_nr-8); inb(0xA1); /* DUMMY */ @@ -95,7 +95,7 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr) outb(cached_21, 0x21); outb(0x20, 0x20); /* Non-specific EOI */ } - spin_unlock_irqrestore(&i8259_lock, flags); + atomic_spin_unlock_irqrestore(&i8259_lock, flags); } static void i8259_set_irq_mask(int irq_nr) @@ -110,13 +110,13 @@ static void i8259_mask_irq(unsigned int irq_nr) pr_debug("i8259_mask_irq(%d)\n", irq_nr); - spin_lock_irqsave(&i8259_lock, flags); + atomic_spin_lock_irqsave(&i8259_lock, flags); if (irq_nr < 8) cached_21 |= 1 << irq_nr; else cached_A1 |= 1 << (irq_nr-8); i8259_set_irq_mask(irq_nr); - spin_unlock_irqrestore(&i8259_lock, flags); + atomic_spin_unlock_irqrestore(&i8259_lock, flags); } static void i8259_unmask_irq(unsigned int irq_nr) @@ -125,13 +125,13 @@ static void i8259_unmask_irq(unsigned int irq_nr) pr_debug("i8259_unmask_irq(%d)\n", irq_nr); - spin_lock_irqsave(&i8259_lock, flags); + atomic_spin_lock_irqsave(&i8259_lock, flags); if (irq_nr < 8) cached_21 &= ~(1 << irq_nr); else cached_A1 &= ~(1 << (irq_nr-8)); i8259_set_irq_mask(irq_nr); - spin_unlock_irqrestore(&i8259_lock, flags); + atomic_spin_unlock_irqrestore(&i8259_lock, flags); } static struct irq_chip i8259_pic = { @@ -241,7 +241,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr) unsigned long flags; /* initialize the controller */ - spin_lock_irqsave(&i8259_lock, flags); + atomic_spin_lock_irqsave(&i8259_lock, flags); /* Mask all first */ outb(0xff, 0xA1); @@ -273,7 +273,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr) outb(cached_A1, 0xA1); outb(cached_21, 0x21); - spin_unlock_irqrestore(&i8259_lock, flags); + atomic_spin_unlock_irqrestore(&i8259_lock, flags); /* create a legacy host */ i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 69e2630..7c84d27 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -32,7 +32,7 @@ static struct ipic * primary_ipic; static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip; -static DEFINE_SPINLOCK(ipic_lock); +static DEFINE_ATOMIC_SPINLOCK(ipic_lock); static struct ipic_info ipic_info[] = { [1] = { @@ -530,13 +530,13 @@ static void ipic_unmask_irq(unsigned int virq) unsigned long flags; u32 temp; - spin_lock_irqsave(&ipic_lock, flags); + atomic_spin_lock_irqsave(&ipic_lock, flags); temp = ipic_read(ipic->regs, ipic_info[src].mask); temp |= (1 << (31 - ipic_info[src].bit)); ipic_write(ipic->regs, ipic_info[src].mask, temp); - spin_unlock_irqrestore(&ipic_lock, flags); + atomic_spin_unlock_irqrestore(&ipic_lock, flags); } static void ipic_mask_irq(unsigned int virq) @@ -546,7 +546,7 @@ static void ipic_mask_irq(unsigned int virq) unsigned long flags; u32 temp; - spin_lock_irqsave(&ipic_lock, flags); + atomic_spin_lock_irqsave(&ipic_lock, flags); temp = ipic_read(ipic->regs, ipic_info[src].mask); temp &= ~(1 << (31 - ipic_info[src].bit)); @@ -556,7 +556,7 @@ static void ipic_mask_irq(unsigned int virq) * for nearly all cases. */ mb(); - spin_unlock_irqrestore(&ipic_lock, flags); + atomic_spin_unlock_irqrestore(&ipic_lock, flags); } static void ipic_ack_irq(unsigned int virq) @@ -566,7 +566,7 @@ static void ipic_ack_irq(unsigned int virq) unsigned long flags; u32 temp; - spin_lock_irqsave(&ipic_lock, flags); + atomic_spin_lock_irqsave(&ipic_lock, flags); temp = 1 << (31 - ipic_info[src].bit); ipic_write(ipic->regs, ipic_info[src].ack, temp); @@ -575,7 +575,7 @@ static void ipic_ack_irq(unsigned int virq) * for nearly all cases. */ mb(); - spin_unlock_irqrestore(&ipic_lock, flags); + atomic_spin_unlock_irqrestore(&ipic_lock, flags); } static void ipic_mask_irq_and_ack(unsigned int virq) @@ -585,7 +585,7 @@ static void ipic_mask_irq_and_ack(unsigned int virq) unsigned long flags; u32 temp; - spin_lock_irqsave(&ipic_lock, flags); + atomic_spin_lock_irqsave(&ipic_lock, flags); temp = ipic_read(ipic->regs, ipic_info[src].mask); temp &= ~(1 << (31 - ipic_info[src].bit)); @@ -598,7 +598,7 @@ static void ipic_mask_irq_and_ack(unsigned int virq) * for nearly all cases. */ mb(); - spin_unlock_irqrestore(&ipic_lock, flags); + atomic_spin_unlock_irqrestore(&ipic_lock, flags); } static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type) diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 3981ae4..b0f66f9 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -46,7 +46,7 @@ static struct mpic *mpics; static struct mpic *mpic_primary; -static DEFINE_SPINLOCK(mpic_lock); +static DEFINE_ATOMIC_SPINLOCK(mpic_lock); #ifdef CONFIG_PPC32 /* XXX for now */ #ifdef CONFIG_IRQ_ALL_CPUS @@ -344,10 +344,10 @@ static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) unsigned int mask = 1U << (fixup->index & 0x1f); writel(mask, fixup->applebase + soff); } else { - spin_lock(&mpic->fixup_lock); + atomic_spin_lock(&mpic->fixup_lock); writeb(0x11 + 2 * fixup->index, fixup->base + 2); writel(fixup->data, fixup->base + 4); - spin_unlock(&mpic->fixup_lock); + atomic_spin_unlock(&mpic->fixup_lock); } } @@ -363,7 +363,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n", source, irqflags, fixup->index); - spin_lock_irqsave(&mpic->fixup_lock, flags); + atomic_spin_lock_irqsave(&mpic->fixup_lock, flags); /* Enable and configure */ writeb(0x10 + 2 * fixup->index, fixup->base + 2); tmp = readl(fixup->base + 4); @@ -371,7 +371,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, if (irqflags & IRQ_LEVEL) tmp |= 0x22; writel(tmp, fixup->base + 4); - spin_unlock_irqrestore(&mpic->fixup_lock, flags); + atomic_spin_unlock_irqrestore(&mpic->fixup_lock, flags); #ifdef CONFIG_PM /* use the lowest bit inverted to the actual HW, @@ -393,12 +393,12 @@ static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags); /* Disable */ - spin_lock_irqsave(&mpic->fixup_lock, flags); + atomic_spin_lock_irqsave(&mpic->fixup_lock, flags); writeb(0x10 + 2 * fixup->index, fixup->base + 2); tmp = readl(fixup->base + 4); tmp |= 1; writel(tmp, fixup->base + 4); - spin_unlock_irqrestore(&mpic->fixup_lock, flags); + atomic_spin_unlock_irqrestore(&mpic->fixup_lock, flags); #ifdef CONFIG_PM /* use the lowest bit inverted to the actual HW, @@ -512,7 +512,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic) BUG_ON(mpic->fixups == NULL); /* Init spinlock */ - spin_lock_init(&mpic->fixup_lock); + atomic_spin_lock_init(&mpic->fixup_lock); /* Map U3 config space. We assume all IO-APICs are on the primary bus * so we only need to map 64kB. @@ -572,12 +572,12 @@ static int irq_choose_cpu(unsigned int virt_irq) cpumask_copy(&mask, irq_desc[virt_irq].affinity); if (cpus_equal(mask, CPU_MASK_ALL)) { static int irq_rover; - static DEFINE_SPINLOCK(irq_rover_lock); + static DEFINE_ATOMIC_SPINLOCK(irq_rover_lock); unsigned long flags; /* Round-robin distribution... */ do_round_robin: - spin_lock_irqsave(&irq_rover_lock, flags); + atomic_spin_lock_irqsave(&irq_rover_lock, flags); while (!cpu_online(irq_rover)) { if (++irq_rover >= NR_CPUS) @@ -589,7 +589,7 @@ static int irq_choose_cpu(unsigned int virt_irq) irq_rover = 0; } while (!cpu_online(irq_rover)); - spin_unlock_irqrestore(&irq_rover_lock, flags); + atomic_spin_unlock_irqrestore(&irq_rover_lock, flags); } else { cpumask_t tmp; @@ -1372,14 +1372,14 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable) unsigned long flags; u32 v; - spin_lock_irqsave(&mpic_lock, flags); + atomic_spin_lock_irqsave(&mpic_lock, flags); v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); if (enable) v |= MPIC_GREG_GLOBAL_CONF_1_SIE; else v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE; mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); - spin_unlock_irqrestore(&mpic_lock, flags); + atomic_spin_unlock_irqrestore(&mpic_lock, flags); } void mpic_irq_set_priority(unsigned int irq, unsigned int pri) @@ -1392,7 +1392,7 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) if (!mpic) return; - spin_lock_irqsave(&mpic_lock, flags); + atomic_spin_lock_irqsave(&mpic_lock, flags); if (mpic_is_ipi(mpic, irq)) { reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & ~MPIC_VECPRI_PRIORITY_MASK; @@ -1404,7 +1404,7 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); } - spin_unlock_irqrestore(&mpic_lock, flags); + atomic_spin_unlock_irqrestore(&mpic_lock, flags); } void mpic_setup_this_cpu(void) @@ -1419,7 +1419,7 @@ void mpic_setup_this_cpu(void) DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); - spin_lock_irqsave(&mpic_lock, flags); + atomic_spin_lock_irqsave(&mpic_lock, flags); /* let the mpic know we want intrs. default affinity is 0xffffffff * until changed via /proc. That's how it's done on x86. If we want @@ -1435,7 +1435,7 @@ void mpic_setup_this_cpu(void) /* Set current processor priority to 0 */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); - spin_unlock_irqrestore(&mpic_lock, flags); + atomic_spin_unlock_irqrestore(&mpic_lock, flags); #endif /* CONFIG_SMP */ } @@ -1464,7 +1464,7 @@ void mpic_teardown_this_cpu(int secondary) BUG_ON(mpic == NULL); DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); - spin_lock_irqsave(&mpic_lock, flags); + atomic_spin_lock_irqsave(&mpic_lock, flags); /* let the mpic know we don't want intrs. */ for (i = 0; i < mpic->num_sources ; i++) @@ -1478,7 +1478,7 @@ void mpic_teardown_this_cpu(int secondary) */ mpic_eoi(mpic); - spin_unlock_irqrestore(&mpic_lock, flags); + atomic_spin_unlock_irqrestore(&mpic_lock, flags); } diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 466ce9a..2492a4a 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) int src; int subvirq; - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->mask(virq); else desc->chip->mask_ack(virq); - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); msr = mfdcr(uic->dcrbase + UIC_MSR); if (!msr) /* spurious interrupt */ @@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) generic_handle_irq(subvirq); uic_irq_ret: - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(virq); - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); } static struct uic * __init uic_init_one(struct device_node *node) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index e1f33a8..db7cf0d 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -348,6 +348,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) unsigned long timeout; #endif + preempt_disable(); local_irq_save(flags); bp = in_breakpoint_table(regs->nip, &offset); @@ -524,6 +525,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) insert_cpu_bpts(); local_irq_restore(flags); + preempt_enable(); return cmd != 'X' && cmd != EOF; } diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 9d2a179..e70d6dd 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h @@ -48,16 +48,21 @@ struct rwsem_waiter; -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *); +extern struct rw_anon_semaphore * +rwsem_down_read_failed(struct rw_anon_semaphore *); +extern struct rw_anon_semaphore * +rwsem_down_write_failed(struct rw_anon_semaphore *); +extern struct rw_anon_semaphore * +rwsem_wake(struct rw_anon_semaphore *); +extern struct rw_anon_semaphore * +rwsem_downgrade_wake(struct rw_anon_semaphore *); +extern struct rw_anon_semaphore * +rwsem_downgrade_write(struct rw_anon_semaphore *); /* * the semaphore definition */ -struct rw_semaphore { +struct rw_anon_semaphore { signed long count; spinlock_t wait_lock; struct list_head wait_list; @@ -85,40 +90,40 @@ struct rw_semaphore { */ #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -# define __RWSEM_DEP_MAP_INIT(lockname) +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) #endif -#define __RWSEM_INITIALIZER(name) \ +#define __RWSEM_ANON_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } + LIST_HEAD_INIT((name).wait_list) __RWSEM_ANON_DEP_MAP_INIT(name) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define DECLARE_ANON_RWSEM(name) \ + struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name) -static inline void init_rwsem(struct rw_semaphore *sem) +static inline void init_anon_rwsem(struct rw_anon_semaphore *sem) { sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); } -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); +extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name, + struct lock_class_key *key); -#define init_rwsem(sem) \ +#define init_anon_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __init_anon_rwsem((sem), #sem, &__key); \ } while (0) /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct rw_anon_semaphore *sem) { signed long old, new; @@ -146,7 +151,7 @@ static inline void __down_read(struct rw_semaphore *sem) /* * trylock for reading -- returns 1 if successful, 0 if contention */ -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct rw_anon_semaphore *sem) { signed long old, new; @@ -177,7 +182,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void +__down_write_nested(struct rw_anon_semaphore *sem, int subclass) { signed long old, new, tmp; @@ -203,7 +209,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) rwsem_down_write_failed(sem); } -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct rw_anon_semaphore *sem) { __down_write_nested(sem, 0); } @@ -211,7 +217,7 @@ static inline void __down_write(struct rw_semaphore *sem) /* * trylock for writing -- returns 1 if successful, 0 if contention */ -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct rw_anon_semaphore *sem) { signed long old; @@ -239,7 +245,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct rw_anon_semaphore *sem) { signed long old, new; @@ -269,7 +275,7 @@ static inline void __up_read(struct rw_semaphore *sem) /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct rw_anon_semaphore *sem) { signed long old, new, tmp; @@ -299,7 +305,7 @@ static inline void __up_write(struct rw_semaphore *sem) /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct rw_anon_semaphore *sem) { signed long old, new, tmp; @@ -328,7 +334,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(long delta, struct rw_anon_semaphore *sem) { signed long old, new; @@ -354,7 +360,8 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) /* * implement exchange and add functionality */ -static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) +static inline long +rwsem_atomic_update(long delta, struct rw_anon_semaphore *sem) { signed long old, new; @@ -378,10 +385,52 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) return new; } -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline int rwsem_is_locked(struct rw_anon_semaphore *sem) { return (sem->count != 0); } +struct rw_semaphore { + signed long count; + spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \ + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_anon_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void init_rwsem(struct rw_anon_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + +static inline void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ + __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key); +} + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + #endif /* __KERNEL__ */ #endif /* _S390_RWSEM_H */ diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 3d8a96d..81150b0 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) * pte_free_tlb frees a pte table and clears the CRSTE for the * page table from the tlb. */ -static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) +static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long address) { if (!tlb->fullmm) { tlb->array[tlb->nr_ptes++] = pte; @@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) * as the pgd. pmd_free_tlb checks the asce_limit against 2GB * to avoid the double free of the pmd in this case. */ -static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) +static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long address) { #ifdef __s390x__ if (tlb->mm->context.asce_limit <= (1UL << 31)) @@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) * as the pgd. pud_free_tlb checks the asce_limit against 4TB * to avoid the double free of the pud in this case. */ -static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) +static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long address) { #ifdef __s390x__ if (tlb->mm->context.asce_limit <= (1UL << 42)) diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index f9b1440..8d15314 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void) machine_flags |= MACHINE_FLAG_VM; } -static void early_pgm_check_handler(void) +static __init void early_pgm_check_handler(void) { unsigned long addr; const struct exception_table_entry *fixup; @@ -222,7 +222,7 @@ static void early_pgm_check_handler(void) S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; } -void setup_lowcore_early(void) +static noinline __init void setup_lowcore_early(void) { psw_t psw; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2270730..be2cae0 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -687,13 +687,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus) #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) lowcore->extended_save_area_addr = (u32) save_area; -#else - if (vdso_alloc_per_cpu(smp_processor_id(), lowcore)) - BUG(); #endif set_prefix((u32)(unsigned long) lowcore); local_mcck_enable(); local_irq_enable(); +#ifdef CONFIG_64BIT + if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) + BUG(); +#endif for_each_possible_cpu(cpu) if (cpu != smp_processor_id()) smp_create_idle(cpu); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c..5582ff1 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -272,14 +272,14 @@ void __init time_init(void) * small for /proc/uptime to be accurate. * Reset xtime and wall_to_monotonic to sane values. */ - write_seqlock_irqsave(&xtime_lock, flags); + write_atomic_seqlock_irqsave(&xtime_lock, flags); now = get_clock(); tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); clocksource_tod.cycle_last = now; clocksource_tod.raw_time = xtime; tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); - write_sequnlock_irqrestore(&xtime_lock, flags); + write_atomic_sequnlock_irqrestore(&xtime_lock, flags); /* Enable TOD clock interrupts on the boot cpu. */ init_cpu_timer(); diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 79dbfee..49106c6 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -88,10 +88,17 @@ __kernel_clock_gettime: llilh %r4,0x0100 sar %a4,%r4 lghi %r4,0 + epsw %r5,0 sacf 512 /* Magic ectg instruction */ .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4 - sacf 0 - sar %a4,%r2 + tml %r5,0x4000 + jo 11f + tml %r5,0x8000 + jno 10f + sacf 256 + j 11f +10: sacf 0 +11: sar %a4,%r2 algr %r1,%r0 /* r1 = cputime as TOD value */ mghi %r1,1000 /* convert to nanoseconds */ srlg %r1,%r1,12 /* r1 = cputime in nanosec */ diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c index e6a4fe9..bd1f5c6 100644 --- a/arch/s390/power/swsusp.c +++ b/arch/s390/power/swsusp.c @@ -7,24 +7,36 @@ * */ +#include -/* - * save CPU registers before creating a hibernation image and before - * restoring the memory state from it - */ void save_processor_state(void) { - /* implentation contained in the - * swsusp_arch_suspend function + /* swsusp_arch_suspend() actually saves all cpu register contents. + * Machine checks must be disabled since swsusp_arch_suspend() stores + * register contents to their lowcore save areas. That's the same + * place where register contents on machine checks would be saved. + * To avoid register corruption disable machine checks. + * We must also disable machine checks in the new psw mask for + * program checks, since swsusp_arch_suspend() may generate program + * checks. Disabling machine checks for all other new psw masks is + * just paranoia. */ + local_mcck_disable(); + /* Disable lowcore protection */ + __ctl_clear_bit(0,28); + S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK; + S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK; + S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK; + S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK; } -/* - * restore the contents of CPU registers - */ void restore_processor_state(void) { - /* implentation contained in the - * swsusp_arch_resume function - */ + S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK; + S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK; + S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK; + S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK; + /* Enable lowcore protection */ + __ctl_set_bit(0,28); + local_mcck_enable(); } diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S index 76d688d..b26df5c 100644 --- a/arch/s390/power/swsusp_asm64.S +++ b/arch/s390/power/swsusp_asm64.S @@ -32,19 +32,14 @@ swsusp_arch_suspend: /* Deactivate DAT */ stnsm __SF_EMPTY(%r15),0xfb - /* Switch off lowcore protection */ - stctg %c0,%c0,__SF_EMPTY(%r15) - ni __SF_EMPTY+4(%r15),0xef - lctlg %c0,%c0,__SF_EMPTY(%r15) - /* Store prefix register on stack */ stpx __SF_EMPTY(%r15) - /* Setup base register for lowcore (absolute 0) */ - llgf %r1,__SF_EMPTY(%r15) + /* Save prefix register contents for lowcore */ + llgf %r4,__SF_EMPTY(%r15) /* Get pointer to save area */ - aghi %r1,0x1000 + lghi %r1,0x1000 /* Store registers */ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ @@ -79,17 +74,15 @@ swsusp_arch_suspend: xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) - /* Setup lowcore */ - brasl %r14,setup_lowcore_early + lghi %r2,0 + lghi %r3,2*PAGE_SIZE + lghi %r5,2*PAGE_SIZE +1: mvcle %r2,%r4,0 + jo 1b /* Save image */ brasl %r14,swsusp_save - /* Switch on lowcore protection */ - stctg %c0,%c0,__SF_EMPTY(%r15) - oi __SF_EMPTY+4(%r15),0x10 - lctlg %c0,%c0,__SF_EMPTY(%r15) - /* Restore prefix register and return */ lghi %r1,0x1000 spx 0x318(%r1) @@ -117,11 +110,6 @@ swsusp_arch_resume: /* Deactivate DAT */ stnsm __SF_EMPTY(%r15),0xfb - /* Switch off lowcore protection */ - stctg %c0,%c0,__SF_EMPTY(%r15) - ni __SF_EMPTY+4(%r15),0xef - lctlg %c0,%c0,__SF_EMPTY(%r15) - /* Set prefix page to zero */ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) @@ -175,7 +163,7 @@ swsusp_arch_resume: /* Load old stack */ lg %r15,0x2f8(%r13) - /* Pointer to save arae */ + /* Pointer to save area */ lghi %r13,0x1000 #ifdef CONFIG_SMP @@ -187,11 +175,6 @@ swsusp_arch_resume: /* Restore prefix register */ spx 0x318(%r13) - /* Switch on lowcore protection */ - stctg %c0,%c0,__SF_EMPTY(%r15) - oi __SF_EMPTY+4(%r15),0x10 - lctlg %c0,%c0,__SF_EMPTY(%r15) - /* Activate DAT */ stosm __SF_EMPTY(%r15),0x04 diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 84dd2db..63ca37b 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -73,20 +73,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) quicklist_free_page(QUICK_PT, NULL, pte); } -#define __pte_free_tlb(tlb,pte) \ +#define __pte_free_tlb(tlb,pte,addr) \ do { \ pgtable_page_dtor(pte); \ tlb_remove_page((tlb), (pte)); \ } while (0) -/* - * allocating and freeing a pmd is trivial: the 1-entry pmd is - * inside the pgd, so has no extra memory associated with it. - */ - -#define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) - static inline void check_pgt_cache(void) { quicklist_trim(QUICK_PGD, NULL, 25, 16); diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h index 1987f3e..ed8c771 100644 --- a/arch/sh/include/asm/rwsem.h +++ b/arch/sh/include/asm/rwsem.h @@ -19,7 +19,7 @@ /* * the semaphore definition */ -struct rw_semaphore { +struct rw_anon_semaphore { long count; #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 @@ -35,35 +35,38 @@ struct rw_semaphore { }; #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -# define __RWSEM_DEP_MAP_INIT(lockname) +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) #endif -#define __RWSEM_INITIALIZER(name) \ +#define __RWSEM_ANON_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } + __RWSEM_ANON_DEP_MAP_INIT(name) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define DECLARE_ANON_RWSEM(name) \ + struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_read_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_write_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_downgrade_wake(struct rw_anon_semaphore *sem); -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); +extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name, + struct lock_class_key *key); -#define init_rwsem(sem) \ +#define init_anon_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __init_anon_rwsem((sem), #sem, &__key); \ } while (0) -static inline void init_rwsem(struct rw_semaphore *sem) +static inline void init_anon_rwsem(struct rw_anon_semaphore *sem) { sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); @@ -73,7 +76,7 @@ static inline void init_rwsem(struct rw_semaphore *sem) /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct rw_anon_semaphore *sem) { if (atomic_inc_return((atomic_t *)(&sem->count)) > 0) smp_wmb(); @@ -81,7 +84,7 @@ static inline void __down_read(struct rw_semaphore *sem) rwsem_down_read_failed(sem); } -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct rw_anon_semaphore *sem) { int tmp; @@ -98,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct rw_anon_semaphore *sem) { int tmp; @@ -110,7 +113,7 @@ static inline void __down_write(struct rw_semaphore *sem) rwsem_down_write_failed(sem); } -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct rw_anon_semaphore *sem) { int tmp; @@ -123,7 +126,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct rw_anon_semaphore *sem) { int tmp; @@ -136,7 +139,7 @@ static inline void __up_read(struct rw_semaphore *sem) /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct rw_anon_semaphore *sem) { smp_wmb(); if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, @@ -147,7 +150,7 @@ static inline void __up_write(struct rw_semaphore *sem) /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem) { atomic_add(delta, (atomic_t *)(&sem->count)); } @@ -155,7 +158,7 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct rw_anon_semaphore *sem) { int tmp; @@ -165,7 +168,8 @@ static inline void __downgrade_write(struct rw_semaphore *sem) rwsem_downgrade_wake(sem); } -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void + __down_write_nested(struct rw_anon_semaphore *sem, int subclass) { __down_write(sem); } @@ -173,12 +177,60 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem) { smp_mb(); return atomic_add_return(delta, (atomic_t *)(&sem->count)); } +static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) +{ + return (sem->count != 0); +} + +struct rw_semaphore { + long count; + spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ + __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key); +} + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + static inline int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 9c16f73..da8fe7a 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -91,9 +91,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) } #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) -#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) -#define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp) +#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) +#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) +#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) #define tlb_migrate_finish(mm) do { } while (0) diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 3d09062..b17f303 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -1,4 +1,4 @@ -/* +1/* * linux/arch/sh/kernel/irq.c * * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar @@ -67,7 +67,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; @@ -88,7 +88,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); out: - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); return 0; } #endif diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h index 681582d..ca2b344 100644 --- a/arch/sparc/include/asm/pgalloc_32.h +++ b/arch/sparc/include/asm/pgalloc_32.h @@ -44,8 +44,8 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long) BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) #define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) -#define pmd_free(mm, pmd) free_pmd_fast(pmd) -#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) +#define pmd_free(mm, pmd) free_pmd_fast(pmd) +#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *) #define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE) @@ -62,7 +62,7 @@ BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *) #define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte) BTFIXUPDEF_CALL(void, pte_free, pgtable_t ) -#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) +#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) #endif /* _SPARC_PGALLOC_H */ diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h index 1dc129a..0825088 100644 --- a/arch/sparc/include/asm/rwsem.h +++ b/arch/sparc/include/asm/rwsem.h @@ -19,7 +19,7 @@ struct rwsem_waiter; -struct rw_semaphore { +struct rw_anon_semaphore { signed int count; spinlock_t wait_lock; struct list_head wait_list; @@ -29,51 +29,92 @@ struct rw_semaphore { }; #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -# define __RWSEM_DEP_MAP_INIT(lockname) +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) #endif -#define __RWSEM_INITIALIZER(name) \ +#define __RWSEMANON__INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } + __RWSEM_ANON_DEP_MAP_INIT(name) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define DECLARE_ANON_RWSEM(name) \ + struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name) -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); +extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name, + struct lock_class_key *key); -#define init_rwsem(sem) \ +#define init_anon_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __init_anon_rwsem((sem), #sem, &__key); \ } while (0) -extern void __down_read(struct rw_semaphore *sem); -extern int __down_read_trylock(struct rw_semaphore *sem); -extern void __down_write(struct rw_semaphore *sem); -extern int __down_write_trylock(struct rw_semaphore *sem); -extern void __up_read(struct rw_semaphore *sem); -extern void __up_write(struct rw_semaphore *sem); -extern void __downgrade_write(struct rw_semaphore *sem); +extern void __down_read(struct rw_anon_semaphore *sem); +extern int __down_read_trylock(struct rw_anon_semaphore *sem); +extern void __down_write(struct rw_anon_semaphore *sem); +extern int __down_write_trylock(struct rw_anon_semaphore *sem); +extern void __up_read(struct rw_anon_semaphore *sem); +extern void __up_write(struct rw_anon_semaphore *sem); +extern void __downgrade_write(struct rw_anon_semaphore *sem); -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void +__down_write_nested(struct rw_anon_semaphore *sem, int subclass) { __down_write(sem); } -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem) { return atomic_add_return(delta, (atomic_t *)(&sem->count)); } -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem) { atomic_add(delta, (atomic_t *)(&sem->count)); } +static inline int anon_rwsem_is_locked(struct rw_semaphore *sem) +{ + return (sem->count != 0); +} + +struct rw_semaphore { + signed int count; + spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#define __RWSEM_INITIALIZER(name) \ +{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ + __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key); +} + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + static inline int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h index ee38e73..dca406b 100644 --- a/arch/sparc/include/asm/tlb_64.h +++ b/arch/sparc/include/asm/tlb_64.h @@ -100,9 +100,9 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) } #define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) -#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage) -#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp) -#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp) +#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage) +#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp) +#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr) #define tlb_migrate_finish(mm) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0) diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index f0ee790..0cb8a19 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) @@ -785,14 +785,14 @@ void fixup_irqs(void) for (irq = 0; irq < NR_IRQS; irq++) { unsigned long flags; - spin_lock_irqsave(&irq_desc[irq].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[irq].lock, flags); if (irq_desc[irq].action && !(irq_desc[irq].status & IRQ_PER_CPU)) { if (irq_desc[irq].chip->set_affinity) irq_desc[irq].chip->set_affinity(irq, irq_desc[irq].affinity); } - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } tick_ops->disable_irq(); diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 85e7037..bea30b2 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -703,10 +703,10 @@ static void pcic_clear_clock_irq(void) static irqreturn_t pcic_timer_handler (int irq, void *h) { - write_seqlock(&xtime_lock); /* Dummy, to show that we remember */ + write_atomic_seqlock(&xtime_lock); /* Dummy, to show that we remember */ pcic_clear_clock_irq(); do_timer(1); - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif @@ -766,7 +766,7 @@ static void pci_do_gettimeofday(struct timeval *tv) unsigned long max_ntp_tick = tick_usec - tickadj; do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); + seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags); usec = do_gettimeoffset(); /* @@ -779,7 +779,7 @@ static void pci_do_gettimeofday(struct timeval *tv) sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags)); while (usec >= 1000000) { usec -= 1000000; diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 614ac7b..6530942 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -93,7 +93,7 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id) #endif /* Protect counter clear so that do_gettimeoffset works */ - write_seqlock(&xtime_lock); + write_atomic_seqlock(&xtime_lock); clear_clock_irq(); @@ -109,7 +109,7 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id) else last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */ } - write_sequnlock(&xtime_lock); + write_atomic_sequnlock(&xtime_lock); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); @@ -251,7 +251,7 @@ void do_gettimeofday(struct timeval *tv) unsigned long max_ntp_tick = tick_usec - tickadj; do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); + seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags); usec = do_gettimeoffset(); /* @@ -264,7 +264,7 @@ void do_gettimeofday(struct timeval *tv) sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags)); while (usec >= 1000000) { usec -= 1000000; @@ -281,9 +281,9 @@ int do_settimeofday(struct timespec *tv) { int ret; - write_seqlock_irq(&xtime_lock); + write_atomic_seqlock_irq(&xtime_lock); ret = bus_do_settimeofday(tv); - write_sequnlock_irq(&xtime_lock); + write_atomic_sequnlock_irq(&xtime_lock); clock_was_set(); return ret; } diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c index 7916feb..a9f414c 100644 --- a/arch/sparc/mm/highmem.c +++ b/arch/sparc/mm/highmem.c @@ -34,7 +34,7 @@ void *kmap_atomic(struct page *page, enum km_type type) unsigned long idx; unsigned long vaddr; - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -73,6 +73,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); + preempt_enable(); return; } @@ -99,6 +100,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) #endif pagefault_enable(); + preempt_enable(); } EXPORT_SYMBOL(kunmap_atomic); diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h index 7189843..32c8ce4 100644 --- a/arch/um/include/asm/pgalloc.h +++ b/arch/um/include/asm/pgalloc.h @@ -40,7 +40,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } -#define __pte_free_tlb(tlb,pte) \ +#define __pte_free_tlb(tlb,pte, address) \ do { \ pgtable_page_dtor(pte); \ tlb_remove_page((tlb),(pte)); \ @@ -53,7 +53,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) free_page((unsigned long)pmd); } -#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x)) #endif #define check_pgt_cache() do { } while (0) diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 5240fa1..660caed 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -116,11 +116,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) -#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep) +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) -#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp) +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr) -#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp) +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) #define tlb_migrate_finish(mm) do {} while (0) diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 454cdb4..048ca4a 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -33,7 +33,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_putc(p, '\n'); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 738bdc6..6136f26 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -122,10 +122,18 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y config RWSEM_GENERIC_SPINLOCK - def_bool !X86_XADD + bool + depends on !X86_XADD || PREEMPT_RT + default y + +config ASM_SEMAPHORES + bool + default y config RWSEM_XCHGADD_ALGORITHM - def_bool X86_XADD + bool + depends on X86_XADD && !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT + default y config ARCH_HAS_CPU_IDLE_WAIT def_bool y @@ -671,7 +679,7 @@ config IOMMU_API config MAXSMP bool "Configure Maximum number of SMP Processors and NUMA Nodes" - depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL + depends on 0 && X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL select CPUMASK_OFFSTACK default n ---help--- diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index d105f29..3347029 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -76,6 +76,7 @@ config DEBUG_PER_CPU_MAPS bool "Debug access to per_cpu maps" depends on DEBUG_KERNEL depends on SMP + depends on !PREEMPT_RT default n ---help--- Say Y to verify that the per_cpu map being accessed has @@ -126,6 +127,7 @@ config DEBUG_NX_TEST config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on X86_32 + default y ---help--- If you say Y here the kernel will use a 4Kb stacksize for the kernel stack attached to each process/thread. This facilitates diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 20d1465..b87dff9 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -50,8 +50,8 @@ #define ACPI_ASM_MACROS #define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable() -#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_DISABLE_IRQS() local_irq_disable_nort() +#define ACPI_ENABLE_IRQS() local_irq_enable_nort() #define ACPI_FLUSH_CPU_CACHE() wbinvd() int __acpi_acquire_global_lock(unsigned int *lock); diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index dc5a667..166704f 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h @@ -186,10 +186,10 @@ static inline int atomic_add_return(int i, atomic_t *v) #ifdef CONFIG_M386 no_xadd: /* Legacy 386 processor */ - local_irq_save(flags); + raw_local_irq_save(flags); __i = atomic_read(v); atomic_set(v, i + __i); - local_irq_restore(flags); + raw_local_irq_restore(flags); return i + __i; #endif } diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 014c2b8..3a53e85 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -58,6 +58,17 @@ extern void *kmap_high(struct page *page); extern void kunmap_high(struct page *page); void *kmap(struct page *page); +extern void kunmap_virt(void *ptr); +extern struct page *kmap_to_page(void *ptr); +void kunmap(struct page *page); + +void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); +void *__kmap_atomic(struct page *page, enum km_type type); +void *__kmap_atomic_direct(struct page *page, enum km_type type); +void __kunmap_atomic(void *kvaddr, enum km_type type); +void *__kmap_atomic_pfn(unsigned long pfn, enum km_type type); +struct page *__kmap_atomic_to_page(void *ptr); + void kunmap(struct page *page); void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); void *kmap_atomic(struct page *page, enum km_type type); @@ -67,7 +78,8 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); struct page *kmap_atomic_to_page(void *ptr); #ifndef CONFIG_PARAVIRT -#define kmap_atomic_pte(page, type) kmap_atomic(page, type) +#define kmap_atomic_pte(page, type) kmap_atomic(page, type) +#define kmap_atomic_pte_direct(page, type) kmap_atomic_direct(page, type) #endif #define flush_cache_kmaps() do { } while (0) @@ -75,6 +87,27 @@ struct page *kmap_atomic_to_page(void *ptr); extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn); +/* + * on PREEMPT_RT kmap_atomic() is a wrapper that uses kmap(): + */ +#ifdef CONFIG_PREEMPT_RT +# define kmap_atomic_prot(page, type, prot) ({ pagefault_disable(); kmap(page); }) +# define kmap_atomic(page, type) ({ pagefault_disable(); kmap(page); }) +# define kmap_atomic_pfn(pfn, type) kmap(pfn_to_page(pfn)) +# define kunmap_atomic(kvaddr, type) do { pagefault_enable(); kunmap_virt(kvaddr); } while(0) +# define kmap_atomic_to_page(kvaddr) kmap_to_page(kvaddr) +# define kmap_atomic_direct(page, type) __kmap_atomic_direct(page, type) +# define kunmap_atomic_direct(kvaddr, type) __kunmap_atomic(kvaddr, type) +#else +# define kmap_atomic_prot(page, type, prot) __kmap_atomic_prot(page, type, prot) +# define kmap_atomic(page, type) __kmap_atomic(page, type) +# define kmap_atomic_pfn(pfn, type) __kmap_atomic_pfn(pfn, type) +# define kunmap_atomic(kvaddr, type) __kunmap_atomic(kvaddr, type) +# define kmap_atomic_to_page(kvaddr) __kmap_atomic_to_page(kvaddr) +# define kmap_atomic_direct(page, type) __kmap_atomic(page, type) +# define kunmap_atomic_direct(kvaddr, type) __kunmap_atomic(kvaddr, type) +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_X86_HIGHMEM_H */ diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h index 1edbf89..a2bd5b5 100644 --- a/arch/x86/include/asm/i8253.h +++ b/arch/x86/include/asm/i8253.h @@ -6,7 +6,7 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -extern spinlock_t i8253_lock; +extern atomic_spinlock_t i8253_lock; extern struct clock_event_device *global_clock_event; diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 58d7091..4720126 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h @@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask; #define SLAVE_ICW4_DEFAULT 0x01 #define PIC_ICW4_AEOI 2 -extern spinlock_t i8259A_lock; +extern atomic_spinlock_t i8259A_lock; extern void init_8259A(int auto_eoi); extern void enable_8259A_irq(unsigned int irq); diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 7639dbf..0ec050a 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -14,12 +14,21 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) -#define STACKFAULT_STACK 1 -#define DOUBLEFAULT_STACK 2 -#define NMI_STACK 3 -#define DEBUG_STACK 4 -#define MCE_STACK 5 -#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#ifdef CONFIG_PREEMPT_RT +# define STACKFAULT_STACK 0 +# define DOUBLEFAULT_STACK 1 +# define NMI_STACK 2 +# define DEBUG_STACK 0 +# define MCE_STACK 3 +# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */ +#else +# define STACKFAULT_STACK 1 +# define DOUBLEFAULT_STACK 2 +# define NMI_STACK 3 +# define DEBUG_STACK 4 +# define MCE_STACK 5 +# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#endif #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4fb37c8..513b09e 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -340,6 +340,7 @@ struct pv_mmu_ops { #ifdef CONFIG_HIGHPTE void *(*kmap_atomic_pte)(struct page *page, enum km_type type); + void *(*kmap_atomic_pte_direct)(struct page *page, enum km_type type); #endif struct pv_lazy_ops lazy_mode; @@ -1136,6 +1137,14 @@ static inline void *kmap_atomic_pte(struct page *page, enum km_type type) ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type); return (void *)ret; } + +static inline void *kmap_atomic_pte_direct(struct page *page, enum km_type type) +{ + unsigned long ret; + ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte_direct, + page, type); + return (void *)ret; +} #endif static inline void pte_update(struct mm_struct *mm, unsigned long addr, diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index b399988..a2baf26 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -83,7 +83,7 @@ struct irq_routing_table { extern unsigned int pcibios_irq_mask; extern int pcibios_scanned; -extern spinlock_t pci_config_lock; +extern atomic_spinlock_t pci_config_lock; extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index dd14c54..0e8c2a0 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -46,7 +46,13 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte) __free_page(pte); } -extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte); +extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); + +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, + unsigned long address) +{ + ___pte_free_tlb(tlb, pte); +} static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) @@ -78,7 +84,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) free_page((unsigned long)pmd); } -extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); +extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); + +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long adddress) +{ + ___pmd_free_tlb(tlb, pmd); +} #ifdef CONFIG_X86_PAE extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); @@ -108,7 +120,14 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) free_page((unsigned long)pud); } -extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); +extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); + +static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long address) +{ + ___pud_free_tlb(tlb, pud); +} + #endif /* PAGETABLE_LEVELS > 3 */ #endif /* PAGETABLE_LEVELS > 2 */ diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 177b016..0e989a1 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -71,6 +71,7 @@ static inline void pud_clear(pud_t *pudp) { unsigned long pgd; + preempt_disable(); set_pud(pudp, __pud(0)); /* @@ -86,6 +87,7 @@ static inline void pud_clear(pud_t *pudp) if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) write_cr3(pgd); + preempt_enable(); } #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 01fd946..b323c3d 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -59,14 +59,20 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); #define pte_offset_map_nested(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ pte_index((address))) +#define pte_offset_map_direct(dir, address) \ + ((pte_t *)kmap_atomic_pte_direct(pmd_page(*(dir)), __KM_PTE) + \ + pte_index((address))) #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) +#define pte_unmap_direct(pte) kunmap_atomic_direct((pte), __KM_PTE) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) #define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) +#define pte_offset_map_direct(dir, address) pte_offset_map((dir), (address)) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) +#define pte_unmap_direct(pte) do { } while (0) #endif /* Clear a kernel PTE and flush it from the TLB */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index c57a301..efc01ae 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -126,8 +126,10 @@ static inline int pgd_large(pgd_t pgd) { return 0; } /* x86-64 always has all page tables mapped. */ #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) -#define pte_unmap(pte) /* NOP */ -#define pte_unmap_nested(pte) /* NOP */ +#define pte_offset_map_direct(dir, address) pte_offset_kernel((dir), (address)) +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) +#define pte_unmap_direct(pte) do { } while (0) #define update_mmu_cache(vma, address, pte) do { } while (0) diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index ca7517d..92d67a6 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h @@ -44,14 +44,14 @@ struct rwsem_waiter; -extern asmregparm struct rw_semaphore * - rwsem_down_read_failed(struct rw_semaphore *sem); -extern asmregparm struct rw_semaphore * - rwsem_down_write_failed(struct rw_semaphore *sem); -extern asmregparm struct rw_semaphore * - rwsem_wake(struct rw_semaphore *); -extern asmregparm struct rw_semaphore * - rwsem_downgrade_wake(struct rw_semaphore *sem); +extern asmregparm struct rw_anon_semaphore * + rwsem_down_read_failed(struct rw_anon_semaphore *sem); +extern asmregparm struct rw_anon_semaphore * + rwsem_down_write_failed(struct rw_anon_semaphore *sem); +extern asmregparm struct rw_anon_semaphore * + rwsem_wake(struct rw_anon_semaphore *); +extern asmregparm struct rw_anon_semaphore * + rwsem_downgrade_wake(struct rw_anon_semaphore *sem); /* * the semaphore definition @@ -64,7 +64,7 @@ extern asmregparm struct rw_semaphore * #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -struct rw_semaphore { +struct rw_anon_semaphore { signed long count; spinlock_t wait_lock; struct list_head wait_list; @@ -74,35 +74,34 @@ struct rw_semaphore { }; #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -# define __RWSEM_DEP_MAP_INIT(lockname) +# define __RWSEM_ANON_DEP_MAP_INIT(lockname) #endif - -#define __RWSEM_INITIALIZER(name) \ +#define __RWSEM_ANON_INITIALIZER(name) \ { \ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define DECLARE_ANON_RWSEM(name) \ + struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name) -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); +extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name, + struct lock_class_key *key); -#define init_rwsem(sem) \ +#define init_anon_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __init_anon_rwsem((sem), #sem, &__key); \ } while (0) /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct rw_anon_semaphore *sem) { asm volatile("# beginning down_read\n\t" LOCK_PREFIX " incl (%%eax)\n\t" @@ -119,7 +118,7 @@ static inline void __down_read(struct rw_semaphore *sem) /* * trylock for reading -- returns 1 if successful, 0 if contention */ -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct rw_anon_semaphore *sem) { __s32 result, tmp; asm volatile("# beginning __down_read_trylock\n\t" @@ -141,7 +140,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void +__down_write_nested(struct rw_anon_semaphore *sem, int subclass) { int tmp; @@ -160,7 +160,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) : "memory", "cc"); } -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct rw_anon_semaphore *sem) { __down_write_nested(sem, 0); } @@ -168,7 +168,7 @@ static inline void __down_write(struct rw_semaphore *sem) /* * trylock for writing -- returns 1 if successful, 0 if contention */ -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct rw_anon_semaphore *sem) { signed long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, @@ -181,7 +181,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct rw_anon_semaphore *sem) { __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; asm volatile("# beginning __up_read\n\t" @@ -199,7 +199,7 @@ static inline void __up_read(struct rw_semaphore *sem) /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct rw_anon_semaphore *sem) { asm volatile("# beginning __up_write\n\t" " movl %2,%%edx\n\t" @@ -218,7 +218,7 @@ static inline void __up_write(struct rw_semaphore *sem) /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct rw_anon_semaphore *sem) { asm volatile("# beginning __downgrade_write\n\t" LOCK_PREFIX " addl %2,(%%eax)\n\t" @@ -235,7 +235,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem) { asm volatile(LOCK_PREFIX "addl %1,%0" : "+m" (sem->count) @@ -245,7 +245,7 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem) { int tmp = delta; @@ -256,10 +256,54 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) return tmp + delta; } +static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) +{ + return (sem->count != 0); +} + +#ifndef CONFIG_PREEMPT_RT + +struct rw_semaphore { + signed long count; + spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#define __RWSEM_INITIALIZER(name) \ +{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ + __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key); +} + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + + static inline int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); } +#endif #endif /* __KERNEL__ */ #endif /* _ASM_X86_RWSEM_H */ diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4e77853..135097c 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define __raw_spin_relax(lock) cpu_relax() +#define __raw_read_relax(lock) cpu_relax() +#define __raw_write_relax(lock) cpu_relax() /* The {read|write|spin}_lock() on x86 are full memory barriers. */ static inline void smp_mb__after_lock(void) { } diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 20ca9c4..1c4277a 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -62,9 +62,9 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) unsigned long long ns; unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); ns = __cycles_2_ns(cyc); - local_irq_restore(flags); + raw_local_irq_restore(flags); return ns; } diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 7f3eba0..1c77e81 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -7,6 +7,21 @@ #include #include +/* + * TLB-flush needs to be nonpreemptible on PREEMPT_RT due to the + * following complex race scenario: + * + * if the current task is lazy-TLB and does a TLB flush and + * gets preempted after the movl %%r3, %0 but before the + * movl %0, %%cr3 then its ->active_mm might change and it will + * install the wrong cr3 when it switches back. This is not a + * problem for the lazy-TLB task itself, but if the next task it + * switches to has an ->mm that is also the lazy-TLB task's + * new ->active_mm, then the scheduler will assume that cr3 is + * the new one, while we overwrote it with the old one. The result + * is the wrong cr3 in the new (non-lazy-TLB) task, which typically + * causes an infinite pagefault upon the next userspace access. + */ #ifdef CONFIG_PARAVIRT #include #else @@ -17,7 +32,9 @@ static inline void __native_flush_tlb(void) { + preempt_disable(); native_write_cr3(native_read_cr3()); + preempt_enable(); } static inline void __native_flush_tlb_global(void) @@ -95,6 +112,13 @@ static inline void __flush_tlb_one(unsigned long addr) static inline void flush_tlb_mm(struct mm_struct *mm) { + /* + * This is safe on PREEMPT_RT because if we preempt + * right after the check but before the __flush_tlb(), + * and if ->active_mm changes, then we might miss a + * TLB flush, but that TLB flush happened already when + * ->active_mm was changed: + */ if (mm == current->active_mm) __flush_tlb(); } diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 20e6a79..d2c6c93 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -212,9 +212,9 @@ extern int __get_user_bad(void); : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #else #define __put_user_asm_u64(x, ptr, retval, errret) \ - __put_user_asm(x, ptr, retval, "q", "", "Zr", errret) + __put_user_asm(x, ptr, retval, "q", "", "er", errret) #define __put_user_asm_ex_u64(x, addr) \ - __put_user_asm_ex(x, addr, "q", "", "Zr") + __put_user_asm_ex(x, addr, "q", "", "er") #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) #endif diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 8cc6873..db24b21 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -88,11 +88,11 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) ret, "l", "k", "ir", 4); return ret; case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, - ret, "q", "", "ir", 8); + ret, "q", "", "er", 8); return ret; case 10: __put_user_asm(*(u64 *)src, (u64 __user *)dst, - ret, "q", "", "ir", 10); + ret, "q", "", "er", 10); if (unlikely(ret)) return ret; asm("":::"memory"); @@ -101,12 +101,12 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) return ret; case 16: __put_user_asm(*(u64 *)src, (u64 __user *)dst, - ret, "q", "", "ir", 16); + ret, "q", "", "er", 16); if (unlikely(ret)) return ret; asm("":::"memory"); __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, - ret, "q", "", "ir", 8); + ret, "q", "", "er", 8); return ret; default: return copy_user_generic((__force void *)dst, src, size); @@ -157,7 +157,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ret, "q", "", "=r", 8); if (likely(!ret)) __put_user_asm(tmp, (u64 __user *)dst, - ret, "q", "", "ir", 8); + ret, "q", "", "er", 8); return ret; } default: diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index dc27a69..c536000 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -5,7 +5,7 @@ #include struct vsyscall_gtod_data { - seqlock_t lock; + atomic_seqlock_t lock; /* open coded 'struct timespec' */ time_t wall_time_sec; diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h index 133b40a..7a6aa68 100644 --- a/arch/x86/include/asm/xor_32.h +++ b/arch/x86/include/asm/xor_32.h @@ -865,7 +865,21 @@ static struct xor_block_template xor_block_pIII_sse = { #include #undef XOR_TRY_TEMPLATES -#define XOR_TRY_TEMPLATES \ +/* + * MMX/SSE ops disable preemption for long periods of time, + * so on PREEMPT_RT use the register-based ops only: + */ +#ifdef CONFIG_PREEMPT_RT +# define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + } while (0) +# define XOR_SELECT_TEMPLATE(FASTEST) (FASTEST) +#else +# define XOR_TRY_TEMPLATES \ do { \ xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_8regs_p); \ @@ -882,7 +896,8 @@ do { \ /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ -#define XOR_SELECT_TEMPLATE(FASTEST) \ +# define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) +#endif /* CONFIG_PREEMPT_RT */ #endif /* _ASM_X86_XOR_32_H */ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 2284a48..0a0e813 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -73,8 +73,8 @@ */ int sis_apic_bug = -1; -static DEFINE_SPINLOCK(ioapic_lock); -static DEFINE_SPINLOCK(vector_lock); +static DEFINE_ATOMIC_SPINLOCK(ioapic_lock); +static DEFINE_ATOMIC_SPINLOCK(vector_lock); /* * # of IRQ routing registers @@ -413,7 +413,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) struct irq_pin_list *entry; unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); entry = cfg->irq_2_pin; for (;;) { unsigned int reg; @@ -425,14 +425,14 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) reg = io_apic_read(entry->apic, 0x10 + pin*2); /* Is the remote IRR bit set? */ if (reg & IO_APIC_REDIR_REMOTE_IRR) { - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); return true; } if (!entry->next) break; entry = entry->next; } - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); return false; } @@ -446,10 +446,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) { union entry_union eu; unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); return eu.entry; } @@ -472,9 +472,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); __ioapic_write_entry(apic, pin, e); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); } /* @@ -487,10 +487,10 @@ static void ioapic_mask_entry(int apic, int pin) unsigned long flags; union entry_union eu = { .entry.mask = 1 }; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x10 + 2*pin, eu.w1); io_apic_write(apic, 0x11 + 2*pin, eu.w2); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); } /* @@ -622,9 +622,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc) BUG_ON(!cfg); - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); __mask_IO_APIC_irq(cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); } static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) @@ -632,9 +632,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) struct irq_cfg *cfg = desc->chip_data; unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); __unmask_IO_APIC_irq(cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); } static void mask_IO_APIC_irq(unsigned int irq) @@ -1158,12 +1158,12 @@ void lock_vector_lock(void) /* Used to the online set of cpus does not change * during assign_irq_vector. */ - spin_lock(&vector_lock); + atomic_spin_lock(&vector_lock); } void unlock_vector_lock(void) { - spin_unlock(&vector_lock); + atomic_spin_unlock(&vector_lock); } static int @@ -1251,9 +1251,9 @@ assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) int err; unsigned long flags; - spin_lock_irqsave(&vector_lock, flags); + atomic_spin_lock_irqsave(&vector_lock, flags); err = __assign_irq_vector(irq, cfg, mask); - spin_unlock_irqrestore(&vector_lock, flags); + atomic_spin_unlock_irqrestore(&vector_lock, flags); return err; } @@ -1623,14 +1623,14 @@ __apicdebuginit(void) print_IO_APIC(void) for (apic = 0; apic < nr_ioapics; apic++) { - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic, 0); reg_01.raw = io_apic_read(apic, 1); if (reg_01.bits.version >= 0x10) reg_02.raw = io_apic_read(apic, 2); if (reg_01.bits.version >= 0x20) reg_03.raw = io_apic_read(apic, 3); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); printk("\n"); printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); @@ -1856,7 +1856,7 @@ __apicdebuginit(void) print_PIC(void) printk(KERN_DEBUG "\nprinting PIC contents\n"); - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); v = inb(0xa1) << 8 | inb(0x21); printk(KERN_DEBUG "... PIC IMR: %04x\n", v); @@ -1870,7 +1870,7 @@ __apicdebuginit(void) print_PIC(void) outb(0x0a,0xa0); outb(0x0a,0x20); - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); printk(KERN_DEBUG "... PIC ISR: %04x\n", v); @@ -1909,9 +1909,9 @@ void __init enable_IO_APIC(void) * The number of IO-APIC IRQ registers (== #pins): */ for (apic = 0; apic < nr_ioapics; apic++) { - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(apic, 1); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); nr_ioapic_registers[apic] = reg_01.bits.entries+1; } for(apic = 0; apic < nr_ioapics; apic++) { @@ -2045,9 +2045,9 @@ static void __init setup_ioapic_ids_from_mpc(void) for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { /* Read the register 0 value */ - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic_id, 0); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); old_id = mp_ioapics[apic_id].apicid; @@ -2106,16 +2106,16 @@ static void __init setup_ioapic_ids_from_mpc(void) mp_ioapics[apic_id].apicid); reg_00.bits.ID = mp_ioapics[apic_id].apicid; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic_id, 0, reg_00.raw); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); /* * Sanity check */ - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(apic_id, 0); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) printk("could not set ID!\n"); else @@ -2164,8 +2164,10 @@ static int __init timer_irq_works(void) */ /* jiffies wrap? */ - if (time_after(jiffies, t1 + 4)) + if (time_after(jiffies, t1 + 4) && + time_before(jiffies, t1 + 16)) return 1; + return 0; } @@ -2198,7 +2200,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) unsigned long flags; struct irq_cfg *cfg; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); if (irq < NR_IRQS_LEGACY) { disable_8259A_irq(irq); if (i8259A_irq_pending(irq)) @@ -2206,7 +2208,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) } cfg = irq_cfg(irq); __unmask_IO_APIC_irq(cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); return was_pending; } @@ -2218,9 +2220,9 @@ static int ioapic_retrigger_irq(unsigned int irq) struct irq_cfg *cfg = irq_cfg(irq); unsigned long flags; - spin_lock_irqsave(&vector_lock, flags); + atomic_spin_lock_irqsave(&vector_lock, flags); apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); - spin_unlock_irqrestore(&vector_lock, flags); + atomic_spin_unlock_irqrestore(&vector_lock, flags); return 1; } @@ -2333,7 +2335,7 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) irq = desc->irq; cfg = desc->chip_data; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); dest = set_desc_affinity(desc, mask); if (dest != BAD_APICID) { /* Only the high 8 bits are valid. */ @@ -2341,7 +2343,7 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) __target_IO_APIC_irq(irq, dest, cfg); ret = 0; } - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); return ret; } @@ -2454,7 +2456,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) continue; cfg = irq_cfg(irq); - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; @@ -2476,7 +2478,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) __get_cpu_var(vector_irq)[vector] = -1; cfg->move_cleanup_count--; unlock: - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); } irq_exit(); @@ -2526,7 +2528,8 @@ static void ack_apic_level(unsigned int irq) irq_complete_move(&desc); #ifdef CONFIG_GENERIC_PENDING_IRQ /* If we are moving the irq we need to mask it */ - if (unlikely(desc->status & IRQ_MOVE_PENDING)) { + if (unlikely(desc->status & IRQ_MOVE_PENDING) && + !(desc->status & IRQ_INPROGRESS)) { do_unmask_irq = 1; mask_IO_APIC_irq_desc(desc); } @@ -2597,14 +2600,23 @@ static void ack_apic_level(unsigned int irq) move_masked_irq(irq); unmask_IO_APIC_irq_desc(desc); } +#if (defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)) && \ + defined(CONFIG_PREEMPT_HARDIRQS) + /* + * With threaded interrupts, we always have IRQ_INPROGRESS + * when acking. + */ + else if (unlikely(desc->status & IRQ_MOVE_PENDING)) + move_masked_irq(irq); +#endif #ifdef CONFIG_X86_32 if (!(v & (1 << (i & 0x1f)))) { atomic_inc(&irq_mis_count); - spin_lock(&ioapic_lock); + atomic_spin_lock(&ioapic_lock); __mask_and_edge_IO_APIC_irq(cfg); __unmask_and_level_IO_APIC_irq(cfg); - spin_unlock(&ioapic_lock); + atomic_spin_unlock(&ioapic_lock); } #endif } @@ -2638,9 +2650,9 @@ eoi_ioapic_irq(struct irq_desc *desc) irq = desc->irq; cfg = desc->chip_data; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); __eoi_ioapic_irq(irq, cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); } static void ir_ack_apic_edge(unsigned int irq) @@ -3116,13 +3128,13 @@ static int ioapic_resume(struct sys_device *dev) data = container_of(dev, struct sysfs_ioapic_data, dev); entry = data->entry; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(dev->id, 0); if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { reg_00.bits.ID = mp_ioapics[dev->id].apicid; io_apic_write(dev->id, 0, reg_00.raw); } - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); for (i = 0; i < nr_ioapic_registers[dev->id]; i++) ioapic_write_entry(dev->id, i, entry[i]); @@ -3186,7 +3198,6 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) if (irq_want < nr_irqs_gsi) irq_want = nr_irqs_gsi; - spin_lock_irqsave(&vector_lock, flags); for (new = irq_want; new < nr_irqs; new++) { desc_new = irq_to_desc_alloc_node(new, node); if (!desc_new) { @@ -3198,13 +3209,14 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) if (cfg_new->vector != 0) continue; + atomic_spin_lock_irqsave(&vector_lock, flags); desc_new = move_irq_desc(desc_new, node); if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) irq = new; + atomic_spin_unlock_irqrestore(&vector_lock, flags); break; } - spin_unlock_irqrestore(&vector_lock, flags); if (irq > 0) { dynamic_irq_init(irq); @@ -3245,9 +3257,9 @@ void destroy_irq(unsigned int irq) desc->chip_data = cfg; free_irte(irq); - spin_lock_irqsave(&vector_lock, flags); + atomic_spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq, cfg); - spin_unlock_irqrestore(&vector_lock, flags); + atomic_spin_unlock_irqrestore(&vector_lock, flags); } /* @@ -3775,10 +3787,10 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, if (err != 0) return err; - spin_lock_irqsave(&vector_lock, flags); + atomic_spin_lock_irqsave(&vector_lock, flags); set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); - spin_unlock_irqrestore(&vector_lock, flags); + atomic_spin_unlock_irqrestore(&vector_lock, flags); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; @@ -3822,9 +3834,9 @@ int __init io_apic_get_redir_entries (int ioapic) union IO_APIC_reg_01 reg_01; unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); return reg_01.bits.entries; } @@ -3965,9 +3977,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) if (physids_empty(apic_id_map)) apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map); - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); reg_00.raw = io_apic_read(ioapic, 0); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); if (apic_id >= get_physical_broadcast()) { printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " @@ -4001,10 +4013,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) if (reg_00.bits.ID != apic_id) { reg_00.bits.ID = apic_id; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(ioapic, 0, reg_00.raw); reg_00.raw = io_apic_read(ioapic, 0); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ if (reg_00.bits.ID != apic_id) { @@ -4025,9 +4037,9 @@ int __init io_apic_get_version(int ioapic) union IO_APIC_reg_01 reg_01; unsigned long flags; - spin_lock_irqsave(&ioapic_lock, flags); + atomic_spin_lock_irqsave(&ioapic_lock, flags); reg_01.raw = io_apic_read(ioapic, 1); - spin_unlock_irqrestore(&ioapic_lock, flags); + atomic_spin_unlock_irqrestore(&ioapic_lock, flags); return reg_01.bits.version; } diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index b3025b4..f6755e5 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c @@ -90,7 +90,9 @@ static inline unsigned int get_timer_irqs(int cpu) */ static __init void nmi_cpu_busy(void *data) { +#ifndef CONFIG_PREEMPT_RT local_irq_enable_in_hardirq(); +#endif /* * Intentionally don't use cpu_relax here. This is * to make sure that the performance counter really ticks, @@ -416,12 +418,12 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) /* We can be called before check_nmi_watchdog, hence NULL check. */ if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) { - static DEFINE_SPINLOCK(lock); /* Serialise the printks */ + static DEFINE_ATOMIC_SPINLOCK(lock); /* Serialise the printks */ - spin_lock(&lock); + atomic_spin_lock(&lock); printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); dump_stack(); - spin_unlock(&lock); + atomic_spin_unlock(&lock); cpumask_clear_cpu(cpu, backtrace_mask); } diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 79302e9..5dc9baa 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1216,7 +1216,7 @@ static void reinit_timer(void) #ifdef INIT_TIMER_AFTER_SUSPEND unsigned long flags; - spin_lock_irqsave(&i8253_lock, flags); + atomic_spin_lock_irqsave(&i8253_lock, flags); /* set the clock to HZ */ outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ udelay(10); @@ -1224,7 +1224,7 @@ static void reinit_timer(void) udelay(10); outb_pit(LATCH >> 8, PIT_CH0); /* MSB */ udelay(10); - spin_unlock_irqrestore(&i8253_lock, flags); + atomic_spin_unlock_irqrestore(&i8253_lock, flags); #endif } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 28e5f59..e2485b0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) /* check CPU config space for extended APIC ID */ - if (c->x86 >= 0xf) { + if (cpu_has_apic && c->x86 >= 0xf) { unsigned int val; val = read_pci_config(0, 24, 0, 0x68); if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f1961c0..53707e5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1004,7 +1004,9 @@ DEFINE_PER_CPU(unsigned int, irq_count) = -1; */ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, +#if DEBUG_STACK > 0 [DEBUG_STACK - 1] = DEBUG_STKSZ +#endif }; static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 484c1e5..1cfb623 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1692,17 +1692,15 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, const char *buf, size_t siz) { char *p; - int len; strncpy(mce_helper, buf, sizeof(mce_helper)); mce_helper[sizeof(mce_helper)-1] = 0; - len = strlen(mce_helper); p = strchr(mce_helper, '\n'); - if (*p) + if (p) *p = 0; - return len; + return strlen(mce_helper) + !!p; } static ssize_t set_ignore_ce(struct sys_device *s, diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 0543f69..d486197 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -549,7 +549,7 @@ static unsigned long set_mtrr_state(void) static unsigned long cr4 = 0; -static DEFINE_SPINLOCK(set_atomicity_lock); +static DEFINE_ATOMIC_SPINLOCK(set_atomicity_lock); /* * Since we are disabling the cache don't allow any interrupts - they @@ -566,7 +566,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) for this CPU while the MTRRs are changed, but changing this requires more invasive changes to the way the kernel boots */ - spin_lock(&set_atomicity_lock); + atomic_spin_lock(&set_atomicity_lock); /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ cr0 = read_cr0() | X86_CR0_CD; @@ -603,7 +603,7 @@ static void post_set(void) __releases(set_atomicity_lock) /* Restore value of CR4 */ if ( cpu_has_pge ) write_cr4(cr4); - spin_unlock(&set_atomicity_lock); + atomic_spin_unlock(&set_atomicity_lock); } static void generic_set_all(void) diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index bca5fba..ffb9886 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -99,6 +99,12 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, } +#if defined(CONFIG_DEBUG_STACKOVERFLOW) && defined(CONFIG_EVENT_TRACE) +extern unsigned long worst_stack_left; +#else +# define worst_stack_left -1L +#endif + void show_registers(struct pt_regs *regs) { int i; diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 54b0a32..5a11d2a 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -21,10 +21,14 @@ static char x86_stack_ids[][8] = { +#if DEBUG_STACK > 0 [DEBUG_STACK - 1] = "#DB", +#endif [NMI_STACK - 1] = "NMI", [DOUBLEFAULT_STACK - 1] = "#DF", +#if STACKFAULT_STACK > 0 [STACKFAULT_STACK - 1] = "#SS", +#endif [MCE_STACK - 1] = "#MC", #if DEBUG_STKSZ > EXCEPTION_STKSZ [N_EXCEPTION_STACKS ... diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 335f049..8b60080 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -59,7 +59,7 @@ static void early_vga_write(struct console *con, const char *str, unsigned n) static struct console early_vga_console = { .name = "earlyvga", .write = early_vga_write, - .flags = CON_PRINTBUFFER, + .flags = CON_PRINTBUFFER | CON_ATOMIC, .index = -1, }; @@ -156,7 +156,7 @@ static __init void early_serial_init(char *s) static struct console early_serial_console = { .name = "earlyser", .write = early_serial_write, - .flags = CON_PRINTBUFFER, + .flags = CON_PRINTBUFFER | CON_ATOMIC, .index = -1, }; @@ -881,7 +881,7 @@ static int __initdata early_console_initialized; asmlinkage void early_printk(const char *fmt, ...) { - char buf[512]; + static char buf[512]; int n; va_list ap; diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c097e7d..f86fc3b 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -371,13 +371,13 @@ END(ret_from_exception) ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? - jnz restore_all + jnz restore_nocheck need_resched: movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl - jz restore_all + jz restore_nocheck testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all + jz restore_nocheck call preempt_schedule_irq jmp need_resched END(resume_kernel) @@ -627,12 +627,9 @@ work_pending: testb $_TIF_NEED_RESCHED, %cl jz work_notifysig work_resched: - call schedule + call __schedule LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret - TRACE_IRQS_OFF + movl TI_flags(%ebp), %ecx andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 70eaa85..4d7255c 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -30,7 +30,11 @@ static void __init zap_identity_mappings(void) { pgd_t *pgd = pgd_offset_k(0UL); pgd_clear(pgd); - __flush_tlb_all(); + /* + * preempt_disable/enable does not work this early in the + * bootup yet: + */ + write_cr3(read_cr3()); } /* Don't add a printk in there. printk relies on the PDA which is not initialized diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 8663afb..8861a4b 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -594,6 +594,7 @@ ignore_int: call dump_stack addl $(5*4),%esp + call dump_stack popl %ds popl %es popl %edx diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 5cf36c0..84d4433 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -16,7 +16,7 @@ #include #include -DEFINE_SPINLOCK(i8253_lock); +DEFINE_ATOMIC_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); #ifdef CONFIG_X86_32 @@ -39,7 +39,7 @@ struct clock_event_device *global_clock_event; static void init_pit_timer(enum clock_event_mode mode, struct clock_event_device *evt) { - spin_lock(&i8253_lock); + atomic_spin_lock(&i8253_lock); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -70,7 +70,7 @@ static void init_pit_timer(enum clock_event_mode mode, /* Nothing to do here */ break; } - spin_unlock(&i8253_lock); + atomic_spin_unlock(&i8253_lock); } /* @@ -80,10 +80,10 @@ static void init_pit_timer(enum clock_event_mode mode, */ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) { - spin_lock(&i8253_lock); + atomic_spin_lock(&i8253_lock); outb_pit(delta & 0xff , PIT_CH0); /* LSB */ outb_pit(delta >> 8 , PIT_CH0); /* MSB */ - spin_unlock(&i8253_lock); + atomic_spin_unlock(&i8253_lock); return 0; } @@ -138,7 +138,7 @@ static cycle_t pit_read(struct clocksource *cs) int count; u32 jifs; - spin_lock_irqsave(&i8253_lock, flags); + atomic_spin_lock_irqsave(&i8253_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine @@ -184,7 +184,7 @@ static cycle_t pit_read(struct clocksource *cs) old_count = count; old_jifs = jifs; - spin_unlock_irqrestore(&i8253_lock, flags); + atomic_spin_unlock_irqrestore(&i8253_lock, flags); count = (LATCH - 1) - count; diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index df89102..6fbe669 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -32,7 +32,7 @@ */ static int i8259A_auto_eoi; -DEFINE_SPINLOCK(i8259A_lock); +DEFINE_ATOMIC_SPINLOCK(i8259A_lock); static void mask_and_ack_8259A(unsigned int); struct irq_chip i8259A_chip = { @@ -68,13 +68,13 @@ void disable_8259A_irq(unsigned int irq) unsigned int mask = 1 << irq; unsigned long flags; - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask |= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); } void enable_8259A_irq(unsigned int irq) @@ -82,13 +82,13 @@ void enable_8259A_irq(unsigned int irq) unsigned int mask = ~(1 << irq); unsigned long flags; - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask &= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); } int i8259A_irq_pending(unsigned int irq) @@ -97,12 +97,12 @@ int i8259A_irq_pending(unsigned int irq) unsigned long flags; int ret; - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); if (irq < 8) ret = inb(PIC_MASTER_CMD) & mask; else ret = inb(PIC_SLAVE_CMD) & (mask >> 8); - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); return ret; } @@ -150,7 +150,7 @@ static void mask_and_ack_8259A(unsigned int irq) unsigned int irqmask = 1 << irq; unsigned long flags; - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); /* * Lightweight spurious IRQ detection. We do not want * to overdo spurious IRQ handling - it's usually a sign @@ -168,6 +168,8 @@ static void mask_and_ack_8259A(unsigned int irq) */ if (cached_irq_mask & irqmask) goto spurious_8259A_irq; + if (irq & 8) + outb(0x60+(irq&7), PIC_SLAVE_CMD); /* 'Specific EOI' to slave */ cached_irq_mask |= irqmask; handle_real_irq: @@ -183,7 +185,7 @@ handle_real_irq: outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ } - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); return; spurious_8259A_irq: @@ -285,24 +287,24 @@ void mask_8259A(void) { unsigned long flags; - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); } void unmask_8259A(void) { unsigned long flags; - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); } void init_8259A(int auto_eoi) @@ -311,7 +313,7 @@ void init_8259A(int auto_eoi) i8259A_auto_eoi = auto_eoi; - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ @@ -328,10 +330,10 @@ void init_8259A(int auto_eoi) /* 8259A-1 (the master) has a slave on IR2 */ outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); - if (auto_eoi) /* master does Auto EOI */ - outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); - else /* master expects normal EOI */ - outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); + if (!auto_eoi) /* master expects normal EOI */ + outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); + else /* master does Auto EOI */ + outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ @@ -356,5 +358,5 @@ void init_8259A(int auto_eoi) outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); } diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index b0cdde6..c51d8e6 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + atomic_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; @@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); out: - spin_unlock_irqrestore(&desc->lock, flags); + atomic_spin_unlock_irqrestore(&desc->lock, flags); return 0; } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 977d8b4..1aa5228 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -81,12 +81,12 @@ void fixup_irqs(void) continue; /* interrupt's are disabled at this point */ - spin_lock(&desc->lock); + atomic_spin_lock(&desc->lock); affinity = desc->affinity; if (!irq_has_action(irq) || cpumask_equal(affinity, cpu_online_mask)) { - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); continue; } @@ -106,7 +106,7 @@ void fixup_irqs(void) if (desc->chip->unmask) desc->chip->unmask(irq); - spin_unlock(&desc->lock); + atomic_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 696f0e4..49dcf16 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -72,6 +72,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) static struct irqaction fpu_irq = { .handler = math_error_irq, .name = "fpu", + .flags = IRQF_NODELAY, }; #endif @@ -81,6 +82,7 @@ static struct irqaction fpu_irq = { static struct irqaction irq2 = { .handler = no_action, .name = "cascade", + .flags = IRQF_NODELAY, }; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { @@ -187,7 +189,7 @@ static void __init apic_intr_init(void) #ifdef CONFIG_X86_THERMAL_VECTOR alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); #endif -#ifdef CONFIG_X86_THRESHOLD +#ifdef CONFIG_X86_MCE_THRESHOLD alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); #endif #if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7b5169d..d23c755 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -454,7 +454,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, /* Boost up -- we can execute copied instructions directly */ reset_current_kprobe(); regs->ip = (unsigned long)p->ainsn.insn; - preempt_enable_no_resched(); + preempt_enable(); return; } #endif @@ -480,7 +480,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, arch_disarm_kprobe(p); regs->ip = (unsigned long)p->addr; reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); break; #endif case KPROBE_HIT_ACTIVE: @@ -576,7 +576,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) } } /* else: not a kprobe fault; let the kernel handle it */ - preempt_enable_no_resched(); + preempt_enable(); return 0; } @@ -876,7 +876,7 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) } reset_current_kprobe(); out: - preempt_enable_no_resched(); + preempt_enable(); /* * if somebody else is singlestepping across a probe point, flags @@ -910,7 +910,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) restore_previous_kprobe(kcb); else reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: @@ -1051,7 +1051,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), kcb->jprobes_stack, MIN_STACK_SIZE(kcb->jprobe_saved_sp)); - preempt_enable_no_resched(); + preempt_enable(); return 1; } return 0; diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 846510b..2a62d84 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c @@ -347,7 +347,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id) static struct irqaction mfgptirq = { .handler = mfgpt_tick, - .flags = IRQF_DISABLED | IRQF_NOBALANCING, + .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, .name = "mfgpt-timer" }; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 70ec9b9..5611ed6 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -422,6 +422,20 @@ struct pv_apic_ops pv_apic_ops = { #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) #endif +#ifdef CONFIG_HIGHPTE +/* + * kmap_atomic() might be an inline or a macro: + */ +static void *kmap_atomic_func(struct page *page, enum km_type idx) +{ + return kmap_atomic(page, idx); +} +static void *kmap_atomic_direct_func(struct page *page, enum km_type idx) +{ + return kmap_atomic_direct(page, idx); +} +#endif + struct pv_mmu_ops pv_mmu_ops = { #ifndef CONFIG_X86_64 .pagetable_setup_start = native_pagetable_setup_start, @@ -462,7 +476,8 @@ struct pv_mmu_ops pv_mmu_ops = { .ptep_modify_prot_commit = __ptep_modify_prot_commit, #ifdef CONFIG_HIGHPTE - .kmap_atomic_pte = kmap_atomic, + .kmap_atomic_pte = kmap_atomic_func, + .kmap_atomic_pte_direct = kmap_atomic_direct_func, #endif #if PAGETABLE_LEVELS >= 3 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 59f4524..b9e7a3f 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -105,7 +105,6 @@ void cpu_idle(void) tick_nohz_stop_sched_tick(1); while (!need_resched()) { - check_pgt_cache(); rmb(); if (cpu_is_offline(cpu)) @@ -117,10 +116,12 @@ void cpu_idle(void) pm_idle(); start_critical_timings(); } + local_irq_disable(); tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); + local_irq_enable(); } } @@ -162,8 +163,10 @@ void __show_regs(struct pt_regs *regs, int all) regs->ax, regs->bx, regs->cx, regs->dx); printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); - printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", - (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); + printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x" + " preempt:%08x\n", + (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, + preempt_count()); if (!all) return; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ebefb54..c8d0ece 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -152,9 +152,11 @@ void cpu_idle(void) } tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); + local_irq_disable(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); + local_irq_enable(); } } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index d2d1ce8..508e982 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -249,6 +249,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), }, }, + { /* Handle problems with rebooting on CompuLab SBC-FITPC2 */ + .callback = set_bios_reboot, + .ident = "CompuLab SBC-FITPC2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"), + DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), + }, + }, { } }; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index de2cab1..63f32d2 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -672,6 +672,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), }, }, + { + /* + * AMI BIOS with low memory corruption was found on Intel DG45ID board. + * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will + * match only DMI_BOARD_NAME and see if there is more bad products + * with this vendor. + */ + .callback = dmi_low_memory_corruption, + .ident = "AMI BIOS", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), + }, + }, #endif {} }; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4c57875..5777895 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -782,6 +782,13 @@ static void do_signal(struct pt_regs *regs) int signr; sigset_t *oldset; +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index ec1de97..a83e38d 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -120,6 +120,16 @@ static void native_smp_send_reschedule(int cpu) apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); } +/* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them: + */ +void smp_send_reschedule_allbutself(void) +{ + apic->send_IPI_allbutself(RESCHEDULE_VECTOR); +} + void native_send_call_func_single_ipi(int cpu) { apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index 5c5d87f..9e2cb0b 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c @@ -84,11 +84,11 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) * manually to deassert NMI lines for the watchdog if run * on an 82489DX-based system. */ - spin_lock(&i8259A_lock); + atomic_spin_lock(&i8259A_lock); outb(0x0c, PIC_MASTER_OCW3); /* Ack the IRQ; AEOI will end it automatically. */ inb(PIC_MASTER_POLL); - spin_unlock(&i8259A_lock); + atomic_spin_unlock(&i8259A_lock); } #endif diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5204332..20ee0b1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -91,9 +91,10 @@ static inline void conditional_sti(struct pt_regs *regs) local_irq_enable(); } -static inline void preempt_conditional_sti(struct pt_regs *regs) +static inline void preempt_conditional_sti(struct pt_regs *regs, int stack) { - inc_preempt_count(); + if (stack) + inc_preempt_count(); if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } @@ -104,11 +105,12 @@ static inline void conditional_cli(struct pt_regs *regs) local_irq_disable(); } -static inline void preempt_conditional_cli(struct pt_regs *regs) +static inline void preempt_conditional_cli(struct pt_regs *regs, int stack) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); - dec_preempt_count(); + if (stack) + dec_preempt_count(); } #ifdef CONFIG_X86_32 @@ -235,9 +237,9 @@ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 12, SIGBUS) == NOTIFY_STOP) return; - preempt_conditional_sti(regs); + preempt_conditional_sti(regs, STACKFAULT_STACK); do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); - preempt_conditional_cli(regs); + preempt_conditional_cli(regs, STACKFAULT_STACK); } dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) @@ -473,9 +475,9 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) return; #endif - preempt_conditional_sti(regs); + preempt_conditional_sti(regs, DEBUG_STACK); do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); - preempt_conditional_cli(regs); + preempt_conditional_cli(regs, DEBUG_STACK); } #ifdef CONFIG_X86_64 @@ -552,7 +554,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) return; /* It's safe to allow irq's after DR6 has been saved */ - preempt_conditional_sti(regs); + preempt_conditional_sti(regs, DEBUG_STACK); /* Mask out spurious debug traps due to lazy DR7 setting */ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { @@ -587,7 +589,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) */ clear_dr7: set_debugreg(0, 7); - preempt_conditional_cli(regs); + preempt_conditional_cli(regs, DEBUG_STACK); return; #ifdef CONFIG_X86_32 @@ -602,7 +604,7 @@ debug_vm86: clear_TF_reenable: set_tsk_thread_flag(tsk, TIF_SINGLESTEP); regs->flags &= ~X86_EFLAGS_TF; - preempt_conditional_cli(regs); + preempt_conditional_cli(regs, DEBUG_STACK); return; } diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 027b5b4..76315a4 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -104,6 +104,7 @@ static __cpuinit void check_tsc_warp(void) */ void __cpuinit check_tsc_sync_source(int cpu) { + unsigned long flags; int cpus = 2; /* @@ -129,8 +130,11 @@ void __cpuinit check_tsc_sync_source(int cpu) /* * Wait for the target to arrive: */ + local_save_flags(flags); + local_irq_enable(); while (atomic_read(&start_count) != cpus-1) cpu_relax(); + local_irq_restore(flags); /* * Trigger the target to continue into the measurement too: */ diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 31ffc24..24e6d2b 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -581,7 +581,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) struct irq_desc *desc; unsigned long flags; - spin_lock_irqsave(&i8259A_lock, flags); + atomic_spin_lock_irqsave(&i8259A_lock, flags); /* Find out what's interrupting in the PIIX4 master 8259 */ outb(0x0c, 0x20); /* OCW3 Poll command */ @@ -618,7 +618,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) outb(0x60 + realirq, 0x20); } - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); desc = irq_to_desc(realirq); @@ -636,18 +636,20 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) return IRQ_HANDLED; out_unlock: - spin_unlock_irqrestore(&i8259A_lock, flags); + atomic_spin_unlock_irqrestore(&i8259A_lock, flags); return IRQ_NONE; } static struct irqaction master_action = { .handler = piix4_master_intr, .name = "PIIX4-8259", + .flags = IRQF_NODELAY, }; static struct irqaction cascade_action = { .handler = no_action, .name = "cascade", + .flags = IRQF_NODELAY, }; diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 9c4e625..cdaeef2 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -137,6 +137,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) local_irq_enable(); if (!current->thread.vm86_info) { + local_irq_disable(); printk("no vm86_info: BAD\n"); do_exit(SIGSEGV); } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 367e878..59f31d2 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -112,11 +112,6 @@ SECTIONS _sdata = .; DATA_DATA CONSTRUCTORS - -#ifdef CONFIG_X86_64 - /* End of data section */ - _edata = .; -#endif } :data #ifdef CONFIG_X86_32 @@ -156,10 +151,8 @@ SECTIONS .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) -#ifdef CONFIG_X86_32 /* End of data section */ _edata = .; -#endif } #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 25ee06a..a6c5525 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -59,7 +59,7 @@ int __vgetcpu_mode __section_vgetcpu_mode; struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data = { - .lock = SEQLOCK_UNLOCKED, + .lock = __ATOMIC_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), .sysctl_enabled = 1, }; @@ -67,27 +67,54 @@ void update_vsyscall_tz(void) { unsigned long flags; - write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + write_atomic_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); /* sys_tz has changed */ vsyscall_gtod_data.sys_tz = sys_tz; - write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); + write_atomic_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) { unsigned long flags; - write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + write_atomic_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + + if (likely(vsyscall_gtod_data.sysctl_enabled == 2)) { + struct timespec tmp = *(wall_time); + cycle_t (*vread)(void); + cycle_t now; + + vread = vsyscall_gtod_data.clock.vread; + if (likely(vread)) + now = vread(); + else + now = clock->read(clock); + + /* calculate interval: */ + now = (now - clock->cycle_last) & clock->mask; + /* convert to nsecs: */ + tmp.tv_nsec += ( now * clock->mult) >> clock->shift; + + while (tmp.tv_nsec >= NSEC_PER_SEC) { + tmp.tv_sec += 1; + tmp.tv_nsec -= NSEC_PER_SEC; + } + + vsyscall_gtod_data.wall_time_sec = tmp.tv_sec; + vsyscall_gtod_data.wall_time_nsec = tmp.tv_nsec; + } else { + vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; + vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; + } + /* copy vsyscall data */ vsyscall_gtod_data.clock.vread = clock->vread; vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; vsyscall_gtod_data.clock.mask = clock->mask; vsyscall_gtod_data.clock.mult = clock->mult; vsyscall_gtod_data.clock.shift = clock->shift; - vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; - vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; - write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); + write_atomic_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); } /* RED-PEN may want to readd seq locking, but then the variable should be @@ -123,8 +150,28 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) unsigned seq; unsigned long mult, shift, nsec; cycle_t (*vread)(void); + + if (likely(__vsyscall_gtod_data.sysctl_enabled == 2)) { + struct timeval tmp; + + do { + barrier(); + tv->tv_sec = __vsyscall_gtod_data.wall_time_sec; + tv->tv_usec = __vsyscall_gtod_data.wall_time_nsec; + barrier(); + tmp.tv_sec = __vsyscall_gtod_data.wall_time_sec; + tmp.tv_usec = __vsyscall_gtod_data.wall_time_nsec; + + } while (tmp.tv_usec != tv->tv_usec || + tmp.tv_sec != tv->tv_sec); + + tv->tv_usec /= NSEC_PER_MSEC; + tv->tv_usec *= USEC_PER_MSEC; + return; + } + do { - seq = read_seqbegin(&__vsyscall_gtod_data.lock); + seq = read_atomic_seqbegin(&__vsyscall_gtod_data.lock); vread = __vsyscall_gtod_data.clock.vread; if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) { @@ -133,6 +180,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) } now = vread(); + base = __vsyscall_gtod_data.clock.cycle_last; mask = __vsyscall_gtod_data.clock.mask; mult = __vsyscall_gtod_data.clock.mult; @@ -140,7 +188,9 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) tv->tv_sec = __vsyscall_gtod_data.wall_time_sec; nsec = __vsyscall_gtod_data.wall_time_nsec; - } while (read_seqretry(&__vsyscall_gtod_data.lock, seq)); + } while (read_atomic_seqretry(&__vsyscall_gtod_data.lock, seq)); + + now = vread(); /* calculate interval: */ cycle_delta = (now - base) & mask; diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 4d6f0d2..bee99bd 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -237,11 +237,11 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) { struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, irq_ack_notifier); - spin_lock(&ps->inject_lock); + atomic_spin_lock(&ps->inject_lock); if (atomic_dec_return(&ps->pit_timer.pending) < 0) atomic_inc(&ps->pit_timer.pending); ps->irq_ack = 1; - spin_unlock(&ps->inject_lock); + atomic_spin_unlock(&ps->inject_lock); } void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) @@ -577,7 +577,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) mutex_init(&pit->pit_state.lock); mutex_lock(&pit->pit_state.lock); - spin_lock_init(&pit->pit_state.inject_lock); + atomic_spin_lock_init(&pit->pit_state.inject_lock); /* Initialize PIO device */ pit->dev.read = pit_ioport_read; @@ -669,12 +669,12 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) /* Try to inject pending interrupts when * last one has been acked. */ - spin_lock(&ps->inject_lock); + atomic_spin_lock(&ps->inject_lock); if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { ps->irq_ack = 0; inject = 1; } - spin_unlock(&ps->inject_lock); + atomic_spin_unlock(&ps->inject_lock); if (inject) __inject_pit_timer_intr(kvm); } diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index bbd863f..33e30e3 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -26,7 +26,7 @@ struct kvm_kpit_state { u32 speaker_data_on; struct mutex lock; struct kvm_pit *pit; - spinlock_t inject_lock; + atomic_spinlock_t inject_lock; unsigned long irq_ack; struct kvm_irq_ack_notifier irq_ack_notifier; }; diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 1ccb50c..91fcca1 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -34,7 +34,7 @@ static void pic_lock(struct kvm_pic *s) __acquires(&s->lock) { - spin_lock(&s->lock); + atomic_spin_lock(&s->lock); } static void pic_unlock(struct kvm_pic *s) @@ -48,7 +48,7 @@ static void pic_unlock(struct kvm_pic *s) s->pending_acks = 0; s->wakeup_needed = false; - spin_unlock(&s->lock); + atomic_spin_unlock(&s->lock); while (acks) { kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)), @@ -522,7 +522,7 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); if (!s) return NULL; - spin_lock_init(&s->lock); + atomic_spin_lock_init(&s->lock); s->kvm = kvm; s->pics[0].elcr_mask = 0xf8; s->pics[1].elcr_mask = 0xde; diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 9f59318..fdae2ef 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -62,7 +62,7 @@ struct kvm_kpic_state { }; struct kvm_pic { - spinlock_t lock; + atomic_spinlock_t lock; bool wakeup_needed; unsigned pending_acks; struct kvm *kvm; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index bfae139..07df264 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -561,6 +561,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) nr = (address - idt_descr.address) >> 3; if (nr == 6) { + zap_rt_locks(); do_invalid_op(regs, 0); return 1; } @@ -1032,7 +1033,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ - if (unlikely(in_atomic() || !mm)) { + if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { bad_area_nosemaphore(regs, error_code, address); return; } diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 71da1bc..71871c8 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -77,13 +77,13 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, if (write) mask |= _PAGE_RW; - ptep = pte_offset_map(&pmd, addr); + ptep = pte_offset_map_direct(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *page; if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { - pte_unmap(ptep); + pte_unmap_direct(ptep); return 0; } VM_BUG_ON(!pfn_valid(pte_pfn(pte))); @@ -93,7 +93,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); - pte_unmap(ptep - 1); + pte_unmap_direct(ptep - 1); return 1; } diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 58f621e..f4ac96d 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -4,9 +4,9 @@ void *kmap(struct page *page) { - might_sleep(); if (!PageHighMem(page)) return page_address(page); + might_sleep(); return kmap_high(page); } @@ -19,6 +19,27 @@ void kunmap(struct page *page) kunmap_high(page); } +void kunmap_virt(void *ptr) +{ + struct page *page; + + if ((unsigned long)ptr < PKMAP_ADDR(0)) + return; + page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); + kunmap(page); +} + +struct page *kmap_to_page(void *ptr) +{ + struct page *page; + + if ((unsigned long)ptr < PKMAP_ADDR(0)) + return virt_to_page(ptr); + page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); + return page; +} +EXPORT_SYMBOL_GPL(kmap_to_page); /* PREEMPT_RT converts some modules to use this */ + /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB @@ -27,12 +48,13 @@ void kunmap(struct page *page) * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) +void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) @@ -42,18 +64,23 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte-idx))); + WARN_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); return (void *)vaddr; } -void *kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic_direct(struct page *page, enum km_type type) +{ + return __kmap_atomic_prot(page, type, kmap_prot); +} + +void *__kmap_atomic(struct page *page, enum km_type type) { return kmap_atomic_prot(page, type, kmap_prot); } -void kunmap_atomic(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); @@ -74,19 +101,21 @@ void kunmap_atomic(void *kvaddr, enum km_type type) } pagefault_enable(); + preempt_enable(); } /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *__kmap_atomic_pfn(unsigned long pfn, enum km_type type) { + preempt_disable(); return kmap_atomic_prot_pfn(pfn, type, kmap_prot); } -EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ +EXPORT_SYMBOL_GPL(__kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ -struct page *kmap_atomic_to_page(void *ptr) +struct page *__kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; @@ -101,8 +130,9 @@ struct page *kmap_atomic_to_page(void *ptr) EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); -EXPORT_SYMBOL(kmap_atomic); -EXPORT_SYMBOL(kunmap_atomic); +EXPORT_SYMBOL(kunmap_virt); +EXPORT_SYMBOL(__kmap_atomic); +EXPORT_SYMBOL(__kunmap_atomic); void __init set_highmem_pages_init(void) { diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 0607119..eab22a7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -14,8 +14,6 @@ #include #include -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - unsigned long __initdata e820_table_start; unsigned long __meminitdata e820_table_end; unsigned long __meminitdata e820_table_top; diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index fe6f84c..bd045c2 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -37,6 +37,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) enum fixed_addresses idx; unsigned long vaddr; + preempt_disable(); pagefault_disable(); debug_kmap_atomic(type); @@ -83,5 +84,6 @@ iounmap_atomic(void *kvaddr, enum km_type type) kpte_clear_flush(kmap_pte-idx, vaddr); pagefault_enable(); + preempt_enable(); } EXPORT_SYMBOL_GPL(iounmap_atomic); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 1b734d7..22a2c36 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -849,8 +849,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, } } +#if 0 /* Must avoid aliasing mappings in the highmem code */ kmap_flush_unused(); +#endif vm_unmap_aliases(); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 8e43bdd..5e3bd3e 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -25,7 +25,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) return pte; } -void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) +void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) { pgtable_page_dtor(pte); paravirt_release_pte(page_to_pfn(pte)); @@ -33,14 +33,14 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) } #if PAGETABLE_LEVELS > 2 -void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) +void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); tlb_remove_page(tlb, virt_to_page(pmd)); } #if PAGETABLE_LEVELS > 3 -void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) +void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) { paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); tlb_remove_page(tlb, virt_to_page(pud)); @@ -132,6 +132,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) reserved at the pmd (PDPT) level. */ set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); + preempt_disable(); /* * According to Intel App note "TLBs, Paging-Structure Caches, * and Their Invalidation", April 2007, document 317080-001, @@ -140,6 +141,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) */ if (mm == current->active_mm) write_cr3(read_cr3()); + preempt_enable(); } #else /* !CONFIG_X86_PAE */ diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 2dfcbf9..dbb5381 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -79,8 +79,10 @@ static __init void bad_srat(void) acpi_numa = -1; for (i = 0; i < MAX_LOCAL_APIC; i++) apicid_to_node[i] = NUMA_NO_NODE; - for (i = 0; i < MAX_NUMNODES; i++) - nodes_add[i].start = nodes[i].end = 0; + for (i = 0; i < MAX_NUMNODES; i++) { + nodes[i].start = nodes[i].end = 0; + nodes_add[i].start = nodes_add[i].end = 0; + } remove_all_active_ranges(); } diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 821e970..55ceed8 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -40,7 +40,7 @@ union smp_flush_state { struct { struct mm_struct *flush_mm; unsigned long flush_va; - spinlock_t tlbstate_lock; + atomic_spinlock_t tlbstate_lock; DECLARE_BITMAP(flush_cpumask, NR_CPUS); }; char pad[CONFIG_X86_INTERNODE_CACHE_BYTES]; @@ -179,7 +179,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is * probably not worth checking this for a cache-hot lock. */ - spin_lock(&f->tlbstate_lock); + atomic_spin_lock(&f->tlbstate_lock); f->flush_mm = mm; f->flush_va = va; @@ -198,7 +198,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, f->flush_mm = NULL; f->flush_va = 0; - spin_unlock(&f->tlbstate_lock); + atomic_spin_unlock(&f->tlbstate_lock); } void native_flush_tlb_others(const struct cpumask *cpumask, @@ -222,7 +222,7 @@ static int __cpuinit init_smp_flush(void) int i; for (i = 0; i < ARRAY_SIZE(flush_state); i++) - spin_lock_init(&flush_state[i].tlbstate_lock); + atomic_spin_lock_init(&flush_state[i].tlbstate_lock); return 0; } diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 89b9a5c..dea0100 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -125,9 +125,9 @@ static void nmi_cpu_setup(void *dummy) { int cpu = smp_processor_id(); struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); - spin_lock(&oprofilefs_lock); + atomic_spin_lock(&oprofilefs_lock); model->setup_ctrs(msrs); - spin_unlock(&oprofilefs_lock); + atomic_spin_unlock(&oprofilefs_lock); per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, APIC_DM_NMI); } diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 2202b62..fc2697c 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -81,7 +81,7 @@ int pcibios_scanned; * This interrupt-safe spinlock protects all accesses to PCI * configuration space. */ -DEFINE_SPINLOCK(pci_config_lock); +DEFINE_ATOMIC_SPINLOCK(pci_config_lock); static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d) { diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index bd13c3e..e76cff3 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c @@ -27,7 +27,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus, return -EINVAL; } - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8); @@ -43,7 +43,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus, break; } - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } @@ -56,7 +56,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus, if ((bus > 255) || (devfn > 255) || (reg > 4095)) return -EINVAL; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8); @@ -72,7 +72,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus, break; } - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } @@ -108,7 +108,7 @@ static int pci_conf2_read(unsigned int seg, unsigned int bus, if (dev & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); outb((u8)(0xF0 | (fn << 1)), 0xCF8); outb((u8)bus, 0xCFA); @@ -127,7 +127,7 @@ static int pci_conf2_read(unsigned int seg, unsigned int bus, outb(0, 0xCF8); - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } @@ -147,7 +147,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus, if (dev & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); outb((u8)(0xF0 | (fn << 1)), 0xCF8); outb((u8)bus, 0xCFA); @@ -166,7 +166,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus, outb(0, 0xCF8); - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } @@ -223,16 +223,23 @@ static int __init pci_check_type1(void) unsigned int tmp; int works = 0; - local_irq_save(flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); outb(0x01, 0xCFB); tmp = inl(0xCF8); outl(0x80000000, 0xCF8); - if (inl(0xCF8) == 0x80000000 && pci_sanity_check(&pci_direct_conf1)) { - works = 1; + + if (inl(0xCF8) == 0x80000000) { + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); + + if (pci_sanity_check(&pci_direct_conf1)) + works = 1; + + atomic_spin_lock_irqsave(&pci_config_lock, flags); } outl(tmp, 0xCF8); - local_irq_restore(flags); + + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return works; } @@ -242,17 +249,19 @@ static int __init pci_check_type2(void) unsigned long flags; int works = 0; - local_irq_save(flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); outb(0x00, 0xCFB); outb(0x00, 0xCF8); outb(0x00, 0xCFA); - if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00 && - pci_sanity_check(&pci_direct_conf2)) { - works = 1; - } - local_irq_restore(flags); + if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00) { + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); + + if (pci_sanity_check(&pci_direct_conf2)) + works = 1; + } else + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return works; } diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c index 8b2d561..7ca333f 100644 --- a/arch/x86/pci/mmconfig_32.c +++ b/arch/x86/pci/mmconfig_32.c @@ -72,7 +72,7 @@ err: *value = -1; if (!base) goto err; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); pci_exp_set_dev_base(base, bus, devfn); @@ -87,7 +87,7 @@ err: *value = -1; *value = mmio_config_readl(mmcfg_virt_addr + reg); break; } - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } @@ -105,7 +105,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, if (!base) return -EINVAL; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); pci_exp_set_dev_base(base, bus, devfn); @@ -120,7 +120,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, mmio_config_writel(mmcfg_virt_addr + reg, value); break; } - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index 8eb295e..f2a1f1f 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c @@ -41,7 +41,7 @@ static int pci_conf1_mq_read(unsigned int seg, unsigned int bus, if (!value || (bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255)) return -EINVAL; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); write_cf8(bus, devfn, reg); @@ -66,7 +66,7 @@ static int pci_conf1_mq_read(unsigned int seg, unsigned int bus, break; } - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } @@ -80,7 +80,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus, if ((bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255)) return -EINVAL; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); write_cf8(bus, devfn, reg); @@ -105,7 +105,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus, break; } - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 1c975cc..ffebd80 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -161,7 +161,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, if (!value || (bus > 255) || (devfn > 255) || (reg > 255)) return -EINVAL; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); switch (len) { case 1: @@ -212,7 +212,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, break; } - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return (int)((result & 0xff00) >> 8); } @@ -227,7 +227,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, if ((bus > 255) || (devfn > 255) || (reg > 255)) return -EINVAL; - spin_lock_irqsave(&pci_config_lock, flags); + atomic_spin_lock_irqsave(&pci_config_lock, flags); switch (len) { case 1: @@ -268,7 +268,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, break; } - spin_unlock_irqrestore(&pci_config_lock, flags); + atomic_spin_unlock_irqrestore(&pci_config_lock, flags); return (int)((result & 0xff00) >> 8); } diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 6a40b78..cd36532 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -47,11 +47,11 @@ notrace static noinline int do_realtime(struct timespec *ts) { unsigned long seq, ns; do { - seq = read_seqbegin(>od->lock); + seq = read_atomic_seqbegin(>od->lock); ts->tv_sec = gtod->wall_time_sec; ts->tv_nsec = gtod->wall_time_nsec; ns = vgetns(); - } while (unlikely(read_seqretry(>od->lock, seq))); + } while (unlikely(read_atomic_seqretry(>od->lock, seq))); timespec_add_ns(ts, ns); return 0; } @@ -76,12 +76,12 @@ notrace static noinline int do_monotonic(struct timespec *ts) { unsigned long seq, ns, secs; do { - seq = read_seqbegin(>od->lock); + seq = read_atomic_seqbegin(>od->lock); secs = gtod->wall_time_sec; ns = gtod->wall_time_nsec + vgetns(); secs += gtod->wall_to_monotonic.tv_sec; ns += gtod->wall_to_monotonic.tv_nsec; - } while (unlikely(read_seqretry(>od->lock, seq))); + } while (unlikely(read_atomic_seqretry(>od->lock, seq))); vset_normalized_timespec(ts, secs, ns); return 0; } diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h index e39edf5..32c5e28 100644 --- a/arch/xtensa/include/asm/rwsem.h +++ b/arch/xtensa/include/asm/rwsem.h @@ -25,7 +25,7 @@ /* * the semaphore definition */ -struct rw_semaphore { +struct rw_anon_semaphore { signed long count; #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 @@ -37,29 +37,37 @@ struct rw_semaphore { struct list_head wait_list; }; -#define __RWSEM_INITIALIZER(name) \ +#define __RWSEM_ANON_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ LIST_HEAD_INIT((name).wait_list) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define DECLARE_ANON_RWSEM(name) \ + struct rw_anon_semaphore name = __RWSEM_ANON_INITIALIZER(name) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_read_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_down_write_failed(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem); +extern struct rw_anon_semaphore * +rwsem_downgrade_wake(struct rw_anon_semaphore *sem); -static inline void init_rwsem(struct rw_semaphore *sem) +static inline void init_anon_rwsem(struct rw_anon_semaphore *sem) { sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); } +static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) +{ + return (sem->count != 0); +} + /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct rw_anon_semaphore *sem) { if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0) smp_wmb(); @@ -67,7 +75,7 @@ static inline void __down_read(struct rw_semaphore *sem) rwsem_down_read_failed(sem); } -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct rw_anon_semaphore *sem) { int tmp; @@ -84,7 +92,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct rw_anon_semaphore *sem) { int tmp; @@ -96,7 +104,7 @@ static inline void __down_write(struct rw_semaphore *sem) rwsem_down_write_failed(sem); } -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct rw_anon_semaphore *sem) { int tmp; @@ -109,7 +117,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct rw_anon_semaphore *sem) { int tmp; @@ -122,7 +130,7 @@ static inline void __up_read(struct rw_semaphore *sem) /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct rw_anon_semaphore *sem) { smp_wmb(); if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, @@ -133,7 +141,7 @@ static inline void __up_write(struct rw_semaphore *sem) /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct rw_anon_semaphore *sem) { atomic_add(delta, (atomic_t *)(&sem->count)); } @@ -141,7 +149,7 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct rw_anon_semaphore *sem) { int tmp; @@ -154,12 +162,37 @@ static inline void __downgrade_write(struct rw_semaphore *sem) /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct rw_anon_semaphore *sem) { smp_mb(); return atomic_add_return(delta, (atomic_t *)(&sem->count)); } +static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem) +{ + return (sem->count != 0); +} + +struct rw_semaphore { + signed long count; + spinlock_t wait_lock; + struct list_head wait_list; +}; + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + static inline int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h index 31c220f..0d766f9 100644 --- a/arch/xtensa/include/asm/tlb.h +++ b/arch/xtensa/include/asm/tlb.h @@ -42,6 +42,6 @@ #include -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) #endif /* _XTENSA_TLB_H */ diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index a1badb3..bb599c3 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) diff --git a/block/blk-core.c b/block/blk-core.c index 4b45435..712b29b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -201,7 +201,7 @@ EXPORT_SYMBOL(blk_dump_rq_flags); */ void blk_plug_device(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); /* * don't plug a stopped queue, it must be paired with blk_start_queue() @@ -241,7 +241,7 @@ EXPORT_SYMBOL(blk_plug_device_unlocked); */ int blk_remove_plug(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) return 0; @@ -333,7 +333,7 @@ EXPORT_SYMBOL(blk_unplug); **/ void blk_start_queue(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 3d87362..feee017 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -197,7 +197,12 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_present; * interrupt level */ ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ -ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ + +/* + * Need to be raw because it might be used in acpi_processor_idle(): + */ +ACPI_EXTERN atomic_spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ + #define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock #define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 23d5505..756fe9e 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -85,7 +85,7 @@ acpi_status acpi_hw_clear_acpi_status(void) ACPI_BITMASK_ALL_FIXED_STATUS, ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + atomic_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); /* Clear the fixed events in PM1 A/B */ @@ -100,7 +100,7 @@ acpi_status acpi_hw_clear_acpi_status(void) status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + atomic_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 9829979..3898b18 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -341,7 +341,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value) return_ACPI_STATUS(AE_BAD_PARAMETER); } - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + atomic_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); /* * At this point, we know that the parent register is one of the @@ -402,7 +402,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value) unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + atomic_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 80bb651..477de01 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -84,7 +84,7 @@ acpi_status acpi_ut_mutex_initialize(void) /* Create the spinlocks for use at interrupt level */ spin_lock_init(acpi_gbl_gpe_lock); - spin_lock_init(acpi_gbl_hardware_lock); + atomic_spin_lock_init(acpi_gbl_hardware_lock); /* Create the reader/writer lock for namespace access */ @@ -117,11 +117,6 @@ void acpi_ut_mutex_terminate(void) (void)acpi_ut_delete_mutex(i); } - /* Delete the spinlocks */ - - acpi_os_delete_lock(acpi_gbl_gpe_lock); - acpi_os_delete_lock(acpi_gbl_hardware_lock); - /* Delete the reader/writer lock */ acpi_ut_delete_rw_lock(&acpi_gbl_namespace_rw_lock); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 391f331..fe086ca 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -573,8 +573,22 @@ static u32 acpi_ec_gpe_handler(void *data) if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) { gpe_transaction(ec, status); if (ec_transaction_done(ec) && - (status & ACPI_EC_FLAG_IBF) == 0) + (status & ACPI_EC_FLAG_IBF) == 0) { + +#ifndef CONFIG_PREEMPT_RT wake_up(&ec->wait); +#else + // hack ... + if (waitqueue_active(&ec->wait)) { + struct task_struct *task; + + task = list_entry(ec->wait.task_list.next, + wait_queue_t, task_list)->private; + if (task) + wake_up_process(task); + } +#endif + } } ec_check_sci(ec, status); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 7167071..1ad7dd9 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -808,14 +808,14 @@ void acpi_os_delete_lock(acpi_spinlock handle) acpi_status acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) { - struct semaphore *sem = NULL; + struct anon_semaphore *sem = NULL; - sem = acpi_os_allocate(sizeof(struct semaphore)); + sem = acpi_os_allocate(sizeof(struct anon_semaphore)); if (!sem) return AE_NO_MEMORY; - memset(sem, 0, sizeof(struct semaphore)); + memset(sem, 0, sizeof(struct anon_semaphore)); - sema_init(sem, initial_units); + anon_sema_init(sem, initial_units); *handle = (acpi_handle *) sem; @@ -834,7 +834,7 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) acpi_status acpi_os_delete_semaphore(acpi_handle handle) { - struct semaphore *sem = (struct semaphore *)handle; + struct anon_semaphore *sem = (struct anon_semaphore *)handle; if (!sem) return AE_BAD_PARAMETER; @@ -854,7 +854,7 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle) acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) { acpi_status status = AE_OK; - struct semaphore *sem = (struct semaphore *)handle; + struct anon_semaphore *sem = (struct anon_semaphore *)handle; long jiffies; int ret = 0; @@ -872,7 +872,7 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) else jiffies = msecs_to_jiffies(timeout); - ret = down_timeout(sem, jiffies); + ret = anon_down_timeout(sem, jiffies); if (ret) status = AE_TIME; @@ -895,7 +895,7 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) */ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) { - struct semaphore *sem = (struct semaphore *)handle; + struct anon_semaphore *sem = (struct anon_semaphore *)handle; if (!sem || (units < 1)) return AE_BAD_PARAMETER; @@ -906,7 +906,7 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, units)); - up(sem); + anon_up(sem); return AE_OK; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 0efa59e..0f90a64 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -919,7 +919,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, } static int c3_cpu_count; -static DEFINE_SPINLOCK(c3_lock); +static DEFINE_ATOMIC_SPINLOCK(c3_lock); /** * acpi_idle_enter_bm - enters C3 with proper BM handling @@ -994,12 +994,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, * without doing anything. */ if (pr->flags.bm_check && pr->flags.bm_control) { - spin_lock(&c3_lock); + atomic_spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); - spin_unlock(&c3_lock); + atomic_spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } @@ -1008,10 +1008,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, /* Re-enable bus master arbitration */ if (pr->flags.bm_check && pr->flags.bm_control) { - spin_lock(&c3_lock); + atomic_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; - spin_unlock(&c3_lock); + atomic_spin_unlock(&c3_lock); } kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index bbbb1fa..b79d110 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -837,9 +837,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned long flags; unsigned int consumed; - local_irq_save(flags); + local_irq_save_nort(flags); consumed = ata_sff_data_xfer(dev, buf, buflen, rw); - local_irq_restore(flags); + local_irq_restore_nort(flags); return consumed; } @@ -878,7 +878,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) unsigned long flags; /* FIXME: use a bounce buffer */ - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page, KM_IRQ0); /* do the actual data transfer */ @@ -886,7 +886,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); kunmap_atomic(buf, KM_IRQ0); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else { buf = page_address(page); ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, @@ -1016,7 +1016,7 @@ next_sg: unsigned long flags; /* FIXME: use bounce buffer */ - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page, KM_IRQ0); /* do the actual data transfer */ @@ -1024,7 +1024,7 @@ next_sg: count, rw); kunmap_atomic(buf, KM_IRQ0); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else { buf = page_address(page); consumed = ap->ops->sff_data_xfer(dev, buf + offset, diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 4b04a15..77ecb26 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -173,10 +173,10 @@ static ssize_t driver_unbind(struct device_driver *drv, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { if (dev->parent) /* Needed for USB */ - down(&dev->parent->sem); + mutex_lock(&dev->parent->mutex); device_release_driver(dev); if (dev->parent) - up(&dev->parent->sem); + mutex_unlock(&dev->parent->mutex); err = count; } put_device(dev); @@ -200,12 +200,12 @@ static ssize_t driver_bind(struct device_driver *drv, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { if (dev->parent) /* Needed for USB */ - down(&dev->parent->sem); - down(&dev->sem); + mutex_lock(&dev->parent->mutex); + mutex_lock(&dev->mutex); err = driver_probe_device(drv, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); if (dev->parent) - up(&dev->parent->sem); + mutex_unlock(&dev->parent->mutex); if (err > 0) { /* success */ @@ -742,10 +742,10 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, if (!dev->driver) { if (dev->parent) /* Needed for USB */ - down(&dev->parent->sem); + mutex_lock(&dev->parent->mutex); ret = device_attach(dev); if (dev->parent) - up(&dev->parent->sem); + mutex_unlock(&dev->parent->mutex); } return ret < 0 ? ret : 0; } @@ -777,10 +777,10 @@ int device_reprobe(struct device *dev) { if (dev->driver) { if (dev->parent) /* Needed for USB */ - down(&dev->parent->sem); + mutex_lock(&dev->parent->mutex); device_release_driver(dev); if (dev->parent) - up(&dev->parent->sem); + mutex_unlock(&dev->parent->mutex); } return bus_rescan_devices_helper(dev, NULL); } diff --git a/drivers/base/core.c b/drivers/base/core.c index 7ecb193..9cfc4a5 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -550,7 +549,7 @@ void device_initialize(struct device *dev) dev->kobj.kset = devices_kset; kobject_init(&dev->kobj, &device_ktype); INIT_LIST_HEAD(&dev->dma_pools); - init_MUTEX(&dev->sem); + mutex_init(&dev->mutex); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); device_init_wakeup(dev, 0); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index f010687..c90f82b 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -84,7 +84,7 @@ static void driver_sysfs_remove(struct device *dev) * for before calling this. (It is ok to call with no other effort * from a driver's probe() method.) * - * This function must be called with @dev->sem held. + * This function must be called with @dev->mutex held. */ int device_bind_driver(struct device *dev) { @@ -189,8 +189,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe); * This function returns -ENODEV if the device is not registered, * 1 if the device is bound sucessfully and 0 otherwise. * - * This function must be called with @dev->sem held. When called for a - * USB interface, @dev->parent->sem must be held as well. + * This function must be called with @dev->mutex held. When called for a + * USB interface, @dev->parent->mutex must be held as well. */ int driver_probe_device(struct device_driver *drv, struct device *dev) { @@ -229,13 +229,13 @@ static int __device_attach(struct device_driver *drv, void *data) * 0 if no matching driver was found; * -ENODEV if the device is not registered. * - * When called for a USB interface, @dev->parent->sem must be held. + * When called for a USB interface, @dev->parent->mutex must be held. */ int device_attach(struct device *dev) { int ret = 0; - down(&dev->sem); + mutex_lock(&dev->mutex); if (dev->driver) { ret = device_bind_driver(dev); if (ret == 0) @@ -247,7 +247,7 @@ int device_attach(struct device *dev) } else { ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); } - up(&dev->sem); + mutex_unlock(&dev->mutex); return ret; } EXPORT_SYMBOL_GPL(device_attach); @@ -270,13 +270,13 @@ static int __driver_attach(struct device *dev, void *data) return 0; if (dev->parent) /* Needed for USB */ - down(&dev->parent->sem); - down(&dev->sem); + mutex_lock(&dev->parent->mutex); + mutex_lock(&dev->mutex); if (!dev->driver) driver_probe_device(drv, dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); if (dev->parent) - up(&dev->parent->sem); + mutex_unlock(&dev->parent->mutex); return 0; } @@ -297,8 +297,8 @@ int driver_attach(struct device_driver *drv) EXPORT_SYMBOL_GPL(driver_attach); /* - * __device_release_driver() must be called with @dev->sem held. - * When called for a USB interface, @dev->parent->sem must be held as well. + * __device_release_driver() must be called with @dev->mutex held. + * When called for a USB interface, @dev->parent->mutex must be held as well. */ static void __device_release_driver(struct device *dev) { @@ -332,7 +332,7 @@ static void __device_release_driver(struct device *dev) * @dev: device. * * Manually detach device from driver. - * When called for a USB interface, @dev->parent->sem must be held. + * When called for a USB interface, @dev->parent->mutex must be held. */ void device_release_driver(struct device *dev) { @@ -341,9 +341,9 @@ void device_release_driver(struct device *dev) * within their ->remove callback for the same device, they * will deadlock right here. */ - down(&dev->sem); + mutex_lock(&dev->mutex); __device_release_driver(dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); } EXPORT_SYMBOL_GPL(device_release_driver); @@ -370,13 +370,13 @@ void driver_detach(struct device_driver *drv) spin_unlock(&drv->p->klist_devices.k_lock); if (dev->parent) /* Needed for USB */ - down(&dev->parent->sem); - down(&dev->sem); + mutex_lock(&dev->parent->mutex); + mutex_lock(&dev->mutex); if (dev->driver == drv) __device_release_driver(dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); if (dev->parent) - up(&dev->parent->sem); + mutex_unlock(&dev->parent->mutex); put_device(dev); } } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 58a3e57..01d026d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -33,8 +33,8 @@ * because children are guaranteed to be discovered after parents, and * are inserted at the back of the list on discovery. * - * Since device_pm_add() may be called with a device semaphore held, - * we must never try to acquire a device semaphore while holding + * Since device_pm_add() may be called with a device mutex held, + * we must never try to acquire a device mutex while holding * dpm_list_mutex. */ @@ -381,7 +381,7 @@ static int device_resume(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); - down(&dev->sem); + mutex_lock(&dev->mutex); if (dev->bus) { if (dev->bus->pm) { @@ -414,7 +414,7 @@ static int device_resume(struct device *dev, pm_message_t state) } } End: - up(&dev->sem); + mutex_unlock(&dev->mutex); TRACE_RESUME(error); return error; @@ -468,7 +468,7 @@ static void dpm_resume(pm_message_t state) */ static void device_complete(struct device *dev, pm_message_t state) { - down(&dev->sem); + mutex_lock(&dev->mutex); if (dev->class && dev->class->pm && dev->class->pm->complete) { pm_dev_dbg(dev, state, "completing class "); @@ -485,7 +485,7 @@ static void device_complete(struct device *dev, pm_message_t state) dev->bus->pm->complete(dev); } - up(&dev->sem); + mutex_unlock(&dev->mutex); } /** @@ -619,7 +619,7 @@ static int device_suspend(struct device *dev, pm_message_t state) { int error = 0; - down(&dev->sem); + mutex_lock(&dev->mutex); if (dev->class) { if (dev->class->pm) { @@ -654,7 +654,7 @@ static int device_suspend(struct device *dev, pm_message_t state) } } End: - up(&dev->sem); + mutex_unlock(&dev->mutex); return error; } @@ -705,7 +705,7 @@ static int device_prepare(struct device *dev, pm_message_t state) { int error = 0; - down(&dev->sem); + mutex_lock(&dev->mutex); if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { pm_dev_dbg(dev, state, "preparing "); @@ -729,7 +729,7 @@ static int device_prepare(struct device *dev, pm_message_t state) suspend_report_result(dev->class->pm->prepare, error); } End: - up(&dev->sem); + mutex_unlock(&dev->mutex); return error; } diff --git a/drivers/block/hd.c b/drivers/block/hd.c index f9d0160..cc71770 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -165,12 +165,12 @@ unsigned long read_timer(void) unsigned long t, flags; int i; - spin_lock_irqsave(&i8253_lock, flags); + atomic_spin_lock_irqsave(&i8253_lock, flags); t = jiffies * 11932; outb_p(0, 0x43); i = inb_p(0x40); i |= inb(0x40) << 8; - spin_unlock_irqrestore(&i8253_lock, flags); + atomic_spin_unlock_irqrestore(&i8253_lock, flags); return(t - i); } #endif diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h index bc37032..0fbc78c 100644 --- a/drivers/block/paride/pseudo.h +++ b/drivers/block/paride/pseudo.h @@ -43,7 +43,7 @@ static unsigned long ps_timeout; static int ps_tq_active = 0; static int ps_nice = 0; -static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); +static __attribute__((unused)) DEFINE_SPINLOCK(ps_spinlock); static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int); diff --git a/drivers/char/pty.c b/drivers/char/pty.c index 6e6942c..3850a68 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c @@ -52,6 +52,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp) return; tty->link->packet = 0; set_bit(TTY_OTHER_CLOSED, &tty->link->flags); + tty_flip_buffer_push(tty->link); wake_up_interruptible(&tty->link->read_wait); wake_up_interruptible(&tty->link->write_wait); if (tty->driver->subtype == PTY_TYPE_MASTER) { @@ -207,6 +208,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp) clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); retval = 0; + tty->low_latency = 1; out: return retval; } diff --git a/drivers/char/random.c b/drivers/char/random.c index 8c74448..91cc9c6 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -623,8 +623,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) preempt_disable(); /* if over the trickle threshold, use only 1 in 4096 samples */ if (input_pool.entropy_count > trickle_thresh && - (__get_cpu_var(trickle_count)++ & 0xfff)) - goto out; + (__get_cpu_var(trickle_count)++ & 0xfff)) { + preempt_enable(); + return; + } + preempt_enable(); sample.jiffies = jiffies; sample.cycles = get_cycles(); @@ -666,8 +669,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) credit_entropy_bits(&input_pool, min_t(int, fls(delta>>1), 11)); } -out: - preempt_enable(); } void add_input_randomness(unsigned int type, unsigned int code, diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index e0d0f8b..d809c4d 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -1197,10 +1197,12 @@ static void rtc_dropped_irq(unsigned long data) spin_unlock_irq(&rtc_lock); +#ifndef CONFIG_PREEMPT_RT if (printk_ratelimit()) { printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq); } +#endif /* Now we have new data */ wake_up_interruptible(&rtc_wait); diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index 810ee25..fdd8542 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c @@ -482,10 +482,14 @@ void tty_flip_buffer_push(struct tty_struct *tty) tty->buf.tail->commit = tty->buf.tail->used; spin_unlock_irqrestore(&tty->buf.lock, flags); +#ifndef CONFIG_PREEMPT_RT if (tty->low_latency) flush_to_ldisc(&tty->buf.work.work); else schedule_delayed_work(&tty->buf.work, 1); +#else + flush_to_ldisc(&tty->buf.work.work); +#endif } EXPORT_SYMBOL(tty_flip_buffer_push); diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 404f4c1..dee3f64 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -2537,7 +2537,7 @@ static struct console vt_console_driver = { .write = vt_console_print, .device = vt_console_device, .unblank = unblank_screen, - .flags = CON_PRINTBUFFER, + .flags = CON_PRINTBUFFER | CON_ATOMIC, .index = -1, }; #endif diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 858fe60..24964c1 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -970,7 +970,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) } for (cs = 0; cs < pvt->num_dcsm; cs++) { - reg = K8_DCSB0 + (cs * 4); + reg = K8_DCSM0 + (cs * 4); err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]); if (unlikely(err)) diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 97e656a..16c0e4a 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -762,9 +762,9 @@ static int update_unit(struct device *dev, void *data) struct fw_driver *driver = (struct fw_driver *)dev->driver; if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { - down(&dev->sem); + mutex_lock(&dev->mutex); driver->update(unit); - up(&dev->sem); + mutex_unlock(&dev->mutex); } return 0; diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index bff0103..fe4fa29 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -593,7 +593,11 @@ static int atk_add_sensor(struct atk_data *data, union acpi_object *obj) sensor->data = data; sensor->id = flags->integer.value; sensor->limit1 = limit1->integer.value; - sensor->limit2 = limit2->integer.value; + if (data->old_interface) + sensor->limit2 = limit2->integer.value; + else + /* The upper limit is expressed as delta from lower limit */ + sensor->limit2 = sensor->limit1 + limit2->integer.value; snprintf(sensor->input_attr_name, ATTR_NAME_SIZE, "%s%d_input", base_name, start + *num); diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index a92dbb9..ba75bfc 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c @@ -86,6 +86,7 @@ superio_exit(void) #define SUPERIO_REG_ACT 0x30 #define SUPERIO_REG_BASE 0x60 #define SUPERIO_REG_DEVID 0x20 +#define SUPERIO_REG_DEVREV 0x21 /* Logical device registers */ @@ -429,6 +430,9 @@ static int __init smsc47m1_find(unsigned short *addr, * The LPC47M292 (device id 0x6B) is somewhat compatible, but it * supports a 3rd fan, and the pin configuration registers are * unfortunately different. + * The LPC47M233 has the same device id (0x6B) but is not compatible. + * We check the high bit of the device revision register to + * differentiate them. */ switch (val) { case 0x51: @@ -448,6 +452,13 @@ static int __init smsc47m1_find(unsigned short *addr, sio_data->type = smsc47m1; break; case 0x6B: + if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) { + pr_debug(DRVNAME ": " + "Found SMSC LPC47M233, unsupported\n"); + superio_exit(); + return -ENODEV; + } + pr_info(DRVNAME ": Found SMSC LPC47M292\n"); sio_data->type = smsc47m2; break; diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c index 1a9cc13..b96f302 100644 --- a/drivers/i2c/chips/tsl2550.c +++ b/drivers/i2c/chips/tsl2550.c @@ -27,7 +27,7 @@ #include #define TSL2550_DRV_NAME "tsl2550" -#define DRIVER_VERSION "1.1.1" +#define DRIVER_VERSION "1.1.2" /* * Defines @@ -189,13 +189,16 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) u8 r = 128; /* Avoid division by 0 and count 1 cannot be greater than count 0 */ - if (c0 && (c1 <= c0)) - r = c1 * 128 / c0; + if (c1 <= c0) + if (c0) { + r = c1 * 128 / c0; + + /* Calculate LUX */ + lux = ((c0 - c1) * ratio_lut[r]) / 256; + } else + lux = 0; else - return -1; - - /* Calculate LUX */ - lux = ((c0 - c1) * ratio_lut[r]) / 256; + return -EAGAIN; /* LUX range check */ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c index e59b6de..b35ede8 100644 --- a/drivers/ide/alim15x3.c +++ b/drivers/ide/alim15x3.c @@ -90,7 +90,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) if (r_clc >= 16) r_clc = 0; } - local_irq_save(flags); + local_irq_save_nort(flags); /* * PIO mode => ATA FIFO on, ATAPI FIFO off @@ -112,7 +112,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) pci_write_config_byte(dev, port, s_clc); pci_write_config_byte(dev, port + unit + 2, (a_clc << 4) | r_clc); - local_irq_restore(flags); + local_irq_restore_nort(flags); } /** @@ -223,7 +223,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev) isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - local_irq_save(flags); + local_irq_save_nort(flags); if (m5229_revision < 0xC2) { /* @@ -314,7 +314,7 @@ out: } pci_dev_put(north); pci_dev_put(isa_dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); return 0; } @@ -376,7 +376,7 @@ static u8 ali_cable_detect(ide_hwif_t *hwif) unsigned long flags; u8 cbl = ATA_CBL_PATA40, tmpbyte; - local_irq_save(flags); + local_irq_save_nort(flags); if (m5229_revision >= 0xC2) { /* @@ -397,7 +397,7 @@ static u8 ali_cable_detect(ide_hwif_t *hwif) } } - local_irq_restore(flags); + local_irq_restore_nort(flags); return cbl; } diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index 7ce68ef..26c3a83 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -1302,7 +1302,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif, dma_old = inb(base + 2); - local_irq_save(flags); + local_irq_save_nort(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); @@ -1313,7 +1313,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif, if (dma_new != dma_old) outb(dma_new, base + 2); - local_irq_restore(flags); + local_irq_restore_nort(flags); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c index 46721c4..b6f114a 100644 --- a/drivers/ide/ide-io-std.c +++ b/drivers/ide/ide-io-std.c @@ -174,7 +174,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -185,7 +185,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, insl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) - local_irq_restore(flags); + local_irq_restore_nort(flags); if (((len + 1) & 3) < 2) return; @@ -218,7 +218,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -229,7 +229,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, outsl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) - local_irq_restore(flags); + local_irq_restore_nort(flags); if (((len + 1) & 3) < 2) return; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index db96138..b8b3ab6 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -667,7 +667,7 @@ void ide_timer_expiry (unsigned long data) /* disable_irq_nosync ?? */ disable_irq(hwif->irq); /* local CPU only, as if we were handling an interrupt */ - local_irq_disable(); + local_irq_disable_nort(); if (hwif->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 2892b24..b1765dd 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -129,12 +129,12 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, if ((stat & ATA_BUSY) == 0) break; - local_irq_restore(flags); + local_irq_restore_nort(flags); *rstat = stat; return -EBUSY; } } - local_irq_restore(flags); + local_irq_restore_nort(flags); } /* * Allow status to settle, then read it again. diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 1bb106f..596f186 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id) int bswap = 1; /* local CPU only; some systems need this */ - local_irq_save(flags); + local_irq_save_nort(flags); /* read 512 bytes of id info */ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); - local_irq_restore(flags); + local_irq_restore_nort(flags); drive->dev_flags |= IDE_DFLAG_ID_READ; #ifdef DEBUG diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 75b85a8..bf341a0 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -248,7 +248,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, offset %= PAGE_SIZE; if (PageHighMem(page)) - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; @@ -269,7 +269,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, kunmap_atomic(buf, KM_BIO_SRC_IRQ); if (PageHighMem(page)) - local_irq_restore(flags); + local_irq_restore_nort(flags); len -= nr_bytes; } @@ -406,7 +406,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, } if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) - local_irq_disable(); + local_irq_disable_nort(); ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 5122b5a..bcc3d32 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -1397,9 +1397,9 @@ static int update_pdrv(struct device *dev, void *data) pdrv = container_of(drv, struct hpsb_protocol_driver, driver); if (pdrv->update) { - down(&ud->device.sem); + mutex_lock(&ud->device.mutex); error = pdrv->update(ud); - up(&ud->device.sem); + mutex_unlock(&ud->device.mutex); } if (error) device_release_driver(&ud->device); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 8c46f22..727776a 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1003,7 +1003,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, port->ib_dev = device; port->port_num = port_num; - init_MUTEX(&port->sm_sem); + semaphore_init(&port->sm_sem); mutex_init(&port->file_mutex); INIT_LIST_HEAD(&port->file_list); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index a0e9753..1dc6446 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -773,7 +773,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) ipoib_mcast_stop_thread(dev, 0); - local_irq_save(flags); + local_irq_save_nort(flags); netif_addr_lock(dev); spin_lock(&priv->lock); @@ -852,7 +852,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) spin_unlock(&priv->lock); netif_addr_unlock(dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); /* We have to cancel outside of the spinlock */ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index ac11be0..29577a8 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c @@ -20,6 +20,7 @@ #include #include #include +#include #include /* HZ */ #include #include @@ -57,11 +58,11 @@ static unsigned int get_time_pit(void) unsigned long flags; unsigned int count; - spin_lock_irqsave(&i8253_lock, flags); + atomic_spin_lock_irqsave(&i8253_lock, flags); outb_p(0x00, 0x43); count = inb_p(0x40); count |= inb_p(0x40) << 8; - spin_unlock_irqrestore(&i8253_lock, flags); + atomic_spin_unlock_irqrestore(&i8253_lock, flags); return count; } @@ -87,12 +88,12 @@ static int gameport_measure_speed(struct gameport *gameport) tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); GET_TIME(t1); for (t = 0; t < 50; t++) gameport_read(gameport); GET_TIME(t2); GET_TIME(t3); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; } @@ -111,11 +112,11 @@ static int gameport_measure_speed(struct gameport *gameport) tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); rdtscl(t1); for (t = 0; t < 50; t++) gameport_read(gameport); rdtscl(t2); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if (t2 - t1 < tx) tx = t2 - t1; } diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 1c0b529..2c52c68 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -146,11 +146,11 @@ static unsigned int get_time_pit(void) unsigned long flags; unsigned int count; - spin_lock_irqsave(&i8253_lock, flags); + atomic_spin_lock_irqsave(&i8253_lock, flags); outb_p(0x00, 0x43); count = inb_p(0x40); count |= inb_p(0x40) << 8; - spin_unlock_irqrestore(&i8253_lock, flags); + atomic_spin_unlock_irqrestore(&i8253_lock, flags); return count; } diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c index 6f35670..53c2547 100644 --- a/drivers/input/keyboard/hil_kbd.c +++ b/drivers/input/keyboard/hil_kbd.c @@ -277,7 +277,7 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv) serio_set_drvdata(serio, kbd); kbd->serio = serio; - init_MUTEX_LOCKED(&kbd->sem); + semaphore_init_locked(&kbd->sem); /* Get device info. MLC driver supplies devid/status/etc. */ serio->write(serio, 0); diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c index 216a559..a59c141 100644 --- a/drivers/input/misc/hp_sdc_rtc.c +++ b/drivers/input/misc/hp_sdc_rtc.c @@ -104,7 +104,7 @@ static int hp_sdc_rtc_do_read_bbrtc (struct rtc_time *rtctm) t.endidx = 91; t.seq = tseq; t.act.semaphore = &tsem; - init_MUTEX_LOCKED(&tsem); + semaphore_init_locked(&tsem); if (hp_sdc_enqueue_transaction(&t)) return -1; @@ -686,7 +686,7 @@ static int __init hp_sdc_rtc_init(void) return -ENODEV; #endif - init_MUTEX(&i8042tregs); + semaphore_init(&i8042tregs); if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr))) return ret; diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c index 21cb755..5dc0ccd 100644 --- a/drivers/input/misc/pcspkr.c +++ b/drivers/input/misc/pcspkr.c @@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr"); #include #else #include -static DEFINE_SPINLOCK(i8253_lock); +static DEFINE_ATOMIC_SPINLOCK(i8253_lock); #endif static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) @@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c if (value > 20 && value < 32767) count = PIT_TICK_RATE / value; - spin_lock_irqsave(&i8253_lock, flags); + atomic_spin_lock_irqsave(&i8253_lock, flags); if (count) { /* set command for counter 2, 2 byte write */ @@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c outb(inb_p(0x61) & 0xFC, 0x61); } - spin_unlock_irqrestore(&i8253_lock, flags); + atomic_spin_unlock_irqrestore(&i8253_lock, flags); return 0; } diff --git a/drivers/input/mouse/hil_ptr.c b/drivers/input/mouse/hil_ptr.c index 3263ce0..4fa00a0 100644 --- a/drivers/input/mouse/hil_ptr.c +++ b/drivers/input/mouse/hil_ptr.c @@ -270,7 +270,7 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver) serio_set_drvdata(serio, ptr); ptr->serio = serio; - init_MUTEX_LOCKED(&ptr->sem); + semaphore_init_locked(&ptr->sem); /* Get device info. MLC driver supplies devid/status/etc. */ serio->write(serio, 0); diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c index 7ba9f2b..17f3641 100644 --- a/drivers/input/serio/hil_mlc.c +++ b/drivers/input/serio/hil_mlc.c @@ -914,15 +914,15 @@ int hil_mlc_register(hil_mlc *mlc) mlc->ostarted = 0; rwlock_init(&mlc->lock); - init_MUTEX(&mlc->osem); + semaphore_init(&mlc->osem); - init_MUTEX(&mlc->isem); + semaphore_init(&mlc->isem); mlc->icount = -1; mlc->imatch = 0; mlc->opercnt = 0; - init_MUTEX_LOCKED(&(mlc->csem)); + semaphore_init(&(mlc->csem)); hil_mlc_clear_di_scratch(mlc); hil_mlc_clear_di_map(mlc, 0); diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c index 1c9410d..14b7ccb 100644 --- a/drivers/input/serio/hp_sdc.c +++ b/drivers/input/serio/hp_sdc.c @@ -1039,7 +1039,7 @@ static int __init hp_sdc_register(void) return hp_sdc.dev_err; } - init_MUTEX_LOCKED(&tq_init_sem); + semaphore_init(&tq_init_sem); tq_init.actidx = 0; tq_init.idx = 1; diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 990e6a7..c3b661a 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -731,10 +731,10 @@ l1oip_socket_thread(void *data) while (!signal_pending(current)) { struct kvec iov = { .iov_base = recvbuf, - .iov_len = sizeof(recvbuf), + .iov_len = recvbuf_size, }; recvlen = kernel_recvmsg(socket, &msg, &iov, 1, - sizeof(recvbuf), 0); + recvbuf_size, 0); if (recvlen > 0) { l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); } else { diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 23741ce..4d9b203 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -83,7 +83,7 @@ static struct adb_driver *adb_controller; BLOCKING_NOTIFIER_HEAD(adb_client_list); static int adb_got_sleep; static int adb_inited; -static DECLARE_MUTEX(adb_probe_mutex); +static DEFINE_SEMAPHORE(adb_probe_mutex); static int sleepy_trackpad; static int autopoll_devs; int __adb_probe_sync; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 529e2ba..ed10381 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1318,7 +1318,7 @@ static int crypt_iterate_devices(struct dm_target *ti, { struct crypt_config *cc = ti->private; - return fn(ti, cc->dev, cc->start, data); + return fn(ti, cc->dev, cc->start, ti->len, data); } static struct target_type crypt_target = { diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 4e5b843..ebe7381 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -324,12 +324,12 @@ static int delay_iterate_devices(struct dm_target *ti, struct delay_c *dc = ti->private; int ret = 0; - ret = fn(ti, dc->dev_read, dc->start_read, data); + ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data); if (ret) goto out; if (dc->dev_write) - ret = fn(ti, dc->dev_write, dc->start_write, data); + ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data); out: return ret; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 9184b6d..82f7d6e 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -139,7 +139,7 @@ static int linear_iterate_devices(struct dm_target *ti, { struct linear_c *lc = ti->private; - return fn(ti, lc->dev, lc->start, data); + return fn(ti, lc->dev, lc->start, ti->len, data); } static struct target_type linear_target = { diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index c70604a..6f0d90d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1453,7 +1453,7 @@ static int multipath_iterate_devices(struct dm_target *ti, list_for_each_entry(pg, &m->priority_groups, list) { list_for_each_entry(p, &pg->pgpaths, list) { - ret = fn(ti, p->path.dev, ti->begin, data); + ret = fn(ti, p->path.dev, ti->begin, ti->len, data); if (ret) goto out; } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ce8868c..9726577 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -638,6 +638,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) spin_lock_irq(&ms->lock); bio_list_merge(&ms->writes, &requeue); spin_unlock_irq(&ms->lock); + delayed_wake(ms); } /* @@ -1292,7 +1293,7 @@ static int mirror_iterate_devices(struct dm_target *ti, for (i = 0; !ret && i < ms->nr_mirrors; i++) ret = fn(ti, ms->mirror[i].dev, - ms->mirror[i].offset, data); + ms->mirror[i].offset, ti->len, data); return ret; } diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index b240e85..4e0e593 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -320,10 +320,11 @@ static int stripe_iterate_devices(struct dm_target *ti, int ret = 0; unsigned i = 0; - do + do { ret = fn(ti, sc->stripe[i].dev, - sc->stripe[i].physical_start, data); - while (!ret && ++i < sc->stripes); + sc->stripe[i].physical_start, + sc->stripe_width, data); + } while (!ret && ++i < sc->stripes); return ret; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2cba557..d952b34 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -346,7 +346,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) * If possible, this checks an area of a destination device is valid. */ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, - sector_t start, void *data) + sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; @@ -359,7 +359,7 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, if (!dev_size) return 1; - if ((start >= dev_size) || (start + ti->len > dev_size)) { + if ((start >= dev_size) || (start + len > dev_size)) { DMWARN("%s: %s too small for target", dm_device_name(ti->table->md), bdevname(bdev, b)); return 0; @@ -377,11 +377,11 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, return 0; } - if (ti->len & (logical_block_size_sectors - 1)) { + if (len & (logical_block_size_sectors - 1)) { DMWARN("%s: len=%llu not aligned to h/w " "logical block size %hu of %s", dm_device_name(ti->table->md), - (unsigned long long)ti->len, + (unsigned long long)len, limits->logical_block_size, bdevname(bdev, b)); return 0; } @@ -482,7 +482,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, - sector_t start, void *data) + sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; @@ -830,11 +830,6 @@ unsigned dm_table_get_type(struct dm_table *t) return t->type; } -bool dm_table_bio_based(struct dm_table *t) -{ - return dm_table_get_type(t) == DM_TYPE_BIO_BASED; -} - bool dm_table_request_based(struct dm_table *t) { return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9acd54a..8a311ea 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2203,16 +2203,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) goto out; } - /* - * It is enought that blk_queue_ordered() is called only once when - * the first bio-based table is bound. - * - * This setting should be moved to alloc_dev() when request-based dm - * supports barrier. - */ - if (!md->map && dm_table_bio_based(table)) - blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); - __unbind(md); r = __bind(md, table, &limits); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 23278ae..a7663eb 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -61,7 +61,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); int dm_table_any_busy_target(struct dm_table *t); int dm_table_set_type(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t); -bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); int dm_table_alloc_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c index efb4a6c..9a6307a 100644 --- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c +++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c @@ -20,8 +20,14 @@ #include "tuner-simple.h" #include "stv0297.h" + +/* Can we use the specified front-end? Remember that if we are compiled + * into the kernel we can't call code that's in modules. */ +#define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \ + (defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE))) + /* lnb control */ -#if defined(CONFIG_DVB_MT312_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) +#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299) static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct flexcop_device *fc = fe->dvb->priv; @@ -49,8 +55,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage } #endif -#if defined(CONFIG_DVB_S5H1420_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) \ - || defined(CONFIG_DVB_MT312_MODULE) +#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312) static int flexcop_sleep(struct dvb_frontend* fe) { struct flexcop_device *fc = fe->dvb->priv; @@ -61,7 +66,7 @@ static int flexcop_sleep(struct dvb_frontend* fe) #endif /* SkyStar2 DVB-S rev 2.3 */ -#if defined(CONFIG_DVB_MT312_MODULE) +#if FE_SUPPORTED(MT312) static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { /* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ @@ -193,10 +198,12 @@ static int skystar2_rev23_attach(struct flexcop_device *fc, } return 0; } +#else +#define skystar2_rev23_attach NULL #endif /* SkyStar2 DVB-S rev 2.6 */ -#if defined(CONFIG_DVB_STV0299_MODULE) +#if FE_SUPPORTED(STV0299) static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio) { @@ -321,10 +328,12 @@ static int skystar2_rev26_attach(struct flexcop_device *fc, } return 0; } +#else +#define skystar2_rev26_attach NULL #endif /* SkyStar2 DVB-S rev 2.7 */ -#if defined(CONFIG_DVB_S5H1420_MODULE) +#if FE_SUPPORTED(S5H1420) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_ITD1000) static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { .demod_address = 0x53, .invert = 1, @@ -385,10 +394,12 @@ fail: fc->fc_i2c_adap[0].no_base_addr = 0; return 0; } +#else +#define skystar2_rev27_attach NULL #endif /* SkyStar2 rev 2.8 */ -#if defined(CONFIG_DVB_CX24123_MODULE) +#if FE_SUPPORTED(CX24123) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_CX24113) static struct cx24123_config skystar2_rev2_8_cx24123_config = { .demod_address = 0x55, .dont_use_pll = 1, @@ -433,10 +444,12 @@ static int skystar2_rev28_attach(struct flexcop_device *fc, * IR-receiver (PIC16F818) - but the card has no input for that ??? */ return 1; } +#else +#define skystar2_rev28_attach NULL #endif /* AirStar DVB-T */ -#if defined(CONFIG_DVB_MT352_MODULE) +#if FE_SUPPORTED(MT352) static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe) { static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d }; @@ -495,10 +508,12 @@ static int airstar_dvbt_attach(struct flexcop_device *fc, } return 0; } +#else +#define airstar_dvbt_attach NULL #endif /* AirStar ATSC 1st generation */ -#if defined(CONFIG_DVB_BCM3510_MODULE) +#if FE_SUPPORTED(BCM3510) static int flexcop_fe_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char* name) { @@ -517,10 +532,12 @@ static int airstar_atsc1_attach(struct flexcop_device *fc, fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c); return fc->fe != NULL; } +#else +#define airstar_atsc1_attach NULL #endif /* AirStar ATSC 2nd generation */ -#if defined(CONFIG_DVB_NXT200X_MODULE) +#if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL) static struct nxt200x_config samsung_tbmv_config = { .demod_address = 0x0a, }; @@ -535,10 +552,12 @@ static int airstar_atsc2_attach(struct flexcop_device *fc, return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TBMV); } +#else +#define airstar_atsc2_attach NULL #endif /* AirStar ATSC 3rd generation */ -#if defined(CONFIG_DVB_LGDT330X_MODULE) +#if FE_SUPPORTED(LGDT330X) static struct lgdt330x_config air2pc_atsc_hd5000_config = { .demod_address = 0x59, .demod_chip = LGDT3303, @@ -556,10 +575,12 @@ static int airstar_atsc3_attach(struct flexcop_device *fc, return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61, TUNER_LG_TDVS_H06XF); } +#else +#define airstar_atsc3_attach NULL #endif /* CableStar2 DVB-C */ -#if defined(CONFIG_DVB_STV0297_MODULE) +#if FE_SUPPORTED(STV0297) static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep) { @@ -698,39 +719,23 @@ static int cablestar2_attach(struct flexcop_device *fc, fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; return 1; } +#else +#define cablestar2_attach NULL #endif static struct { flexcop_device_type_t type; int (*attach)(struct flexcop_device *, struct i2c_adapter *); } flexcop_frontends[] = { -#if defined(CONFIG_DVB_S5H1420_MODULE) { FC_SKY_REV27, skystar2_rev27_attach }, -#endif -#if defined(CONFIG_DVB_CX24123_MODULE) { FC_SKY_REV28, skystar2_rev28_attach }, -#endif -#if defined(CONFIG_DVB_STV0299_MODULE) { FC_SKY_REV26, skystar2_rev26_attach }, -#endif -#if defined(CONFIG_DVB_MT352_MODULE) { FC_AIR_DVBT, airstar_dvbt_attach }, -#endif -#if defined(CONFIG_DVB_NXT200X_MODULE) { FC_AIR_ATSC2, airstar_atsc2_attach }, -#endif -#if defined(CONFIG_DVB_LGDT330X_MODULE) { FC_AIR_ATSC3, airstar_atsc3_attach }, -#endif -#if defined(CONFIG_DVB_BCM3510_MODULE) { FC_AIR_ATSC1, airstar_atsc1_attach }, -#endif -#if defined(CONFIG_DVB_STV0297_MODULE) { FC_CABLE, cablestar2_attach }, -#endif -#if defined(CONFIG_DVB_MT312_MODULE) { FC_SKY_REV23, skystar2_rev23_attach }, -#endif }; /* try to figure out the frontend */ @@ -738,6 +743,8 @@ int flexcop_frontend_init(struct flexcop_device *fc) { int i; for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) { + if (!flexcop_frontends[i].attach) + continue; /* type needs to be set before, because of some workarounds * done based on the probed card type */ fc->dev_type = flexcop_frontends[i].type; diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index f50ca72..28f0e3f 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -101,7 +101,7 @@ struct dvb_frontend_private { struct dvb_device *dvbdev; struct dvb_frontend_parameters parameters; struct dvb_fe_events events; - struct semaphore sem; + struct anon_semaphore sem; struct list_head list_head; wait_queue_head_t wait_queue; struct task_struct *thread; @@ -189,12 +189,12 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe, if (flags & O_NONBLOCK) return -EWOULDBLOCK; - up(&fepriv->sem); + anon_up(&fepriv->sem); ret = wait_event_interruptible (events->wait_queue, events->eventw != events->eventr); - if (down_interruptible (&fepriv->sem)) + if (anon_down_interruptible (&fepriv->sem)) return -ERESTARTSYS; if (ret < 0) @@ -534,7 +534,7 @@ static int dvb_frontend_thread(void *data) set_freezable(); while (1) { - up(&fepriv->sem); /* is locked when we enter the thread... */ + anon_up(&fepriv->sem); /* is locked when we enter the thread... */ restart: timeout = wait_event_interruptible_timeout(fepriv->wait_queue, dvb_frontend_should_wakeup(fe) || kthread_should_stop() @@ -550,7 +550,7 @@ restart: if (try_to_freeze()) goto restart; - if (down_interruptible(&fepriv->sem)) + if (anon_down_interruptible(&fepriv->sem)) break; if (fepriv->reinitialise) { @@ -678,7 +678,7 @@ static void dvb_frontend_stop(struct dvb_frontend *fe) kthread_stop(fepriv->thread); - init_MUTEX (&fepriv->sem); + anon_semaphore_init(&fepriv->sem); fepriv->state = FESTATE_IDLE; /* paranoia check in case a signal arrived */ @@ -747,7 +747,7 @@ static int dvb_frontend_start(struct dvb_frontend *fe) if (signal_pending(current)) return -EINTR; - if (down_interruptible (&fepriv->sem)) + if (anon_down_interruptible (&fepriv->sem)) return -EINTR; fepriv->state = FESTATE_IDLE; @@ -760,7 +760,7 @@ static int dvb_frontend_start(struct dvb_frontend *fe) if (IS_ERR(fe_thread)) { ret = PTR_ERR(fe_thread); printk("dvb_frontend_start: failed to start kthread (%d)\n", ret); - up(&fepriv->sem); + anon_up(&fepriv->sem); return ret; } fepriv->thread = fe_thread; @@ -1372,7 +1372,7 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file, cmd == FE_DISEQC_RECV_SLAVE_REPLY)) return -EPERM; - if (down_interruptible (&fepriv->sem)) + if (anon_down_interruptible (&fepriv->sem)) return -ERESTARTSYS; if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY)) @@ -1382,7 +1382,7 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file, err = dvb_frontend_ioctl_legacy(inode, file, cmd, parg); } - up(&fepriv->sem); + anon_up(&fepriv->sem); return err; } @@ -1909,7 +1909,7 @@ int dvb_register_frontend(struct dvb_adapter* dvb, } fepriv = fe->frontend_priv; - init_MUTEX (&fepriv->sem); + anon_semaphore_init(&fepriv->sem); init_waitqueue_head (&fepriv->wait_queue); init_waitqueue_head (&fepriv->events.wait_queue); mutex_init(&fepriv->events.mtx); diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c index 136c586..12e018b 100644 --- a/drivers/media/dvb/frontends/af9013.c +++ b/drivers/media/dvb/frontends/af9013.c @@ -527,6 +527,10 @@ static int af9013_set_ofdm_params(struct af9013_state *state, u8 i, buf[3] = {0, 0, 0}; *auto_mode = 0; /* set if parameters are requested to auto set */ + /* Try auto-detect transmission parameters in case of AUTO requested or + garbage parameters given by application for compatibility. + MPlayer seems to provide garbage parameters currently. */ + switch (params->transmission_mode) { case TRANSMISSION_MODE_AUTO: *auto_mode = 1; @@ -536,7 +540,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[0] |= (1 << 0); break; default: - return -EINVAL; + deb_info("%s: invalid transmission_mode\n", __func__); + *auto_mode = 1; } switch (params->guard_interval) { @@ -554,7 +559,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[0] |= (3 << 2); break; default: - return -EINVAL; + deb_info("%s: invalid guard_interval\n", __func__); + *auto_mode = 1; } switch (params->hierarchy_information) { @@ -572,7 +578,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[0] |= (3 << 4); break; default: - return -EINVAL; + deb_info("%s: invalid hierarchy_information\n", __func__); + *auto_mode = 1; }; switch (params->constellation) { @@ -587,7 +594,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[1] |= (2 << 6); break; default: - return -EINVAL; + deb_info("%s: invalid constellation\n", __func__); + *auto_mode = 1; } /* Use HP. How and which case we can switch to LP? */ @@ -611,7 +619,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[2] |= (4 << 0); break; default: - return -EINVAL; + deb_info("%s: invalid code_rate_HP\n", __func__); + *auto_mode = 1; } switch (params->code_rate_LP) { @@ -638,7 +647,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, if (params->hierarchy_information == HIERARCHY_AUTO) break; default: - return -EINVAL; + deb_info("%s: invalid code_rate_LP\n", __func__); + *auto_mode = 1; } switch (params->bandwidth) { @@ -651,7 +661,8 @@ static int af9013_set_ofdm_params(struct af9013_state *state, buf[1] |= (2 << 2); break; default: - return -EINVAL; + deb_info("%s: invalid bandwidth\n", __func__); + buf[1] |= (2 << 2); /* cannot auto-detect BW, try 8 MHz */ } /* program */ diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c index fdb4adf..ca6558c 100644 --- a/drivers/media/video/bt8xx/bttv-cards.c +++ b/drivers/media/video/bt8xx/bttv-cards.c @@ -3324,8 +3324,6 @@ void __devinit bttv_init_card1(struct bttv *btv) /* initialization part two -- after registering i2c bus */ void __devinit bttv_init_card2(struct bttv *btv) { - int addr=ADDR_UNSET; - btv->tuner_type = UNSET; if (BTTV_BOARD_UNKNOWN == btv->c.type) { @@ -3470,9 +3468,6 @@ void __devinit bttv_init_card2(struct bttv *btv) btv->pll.pll_current = -1; /* tuner configuration (from card list / autodetect / insmod option) */ - if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) - addr = bttv_tvcards[btv->c.type].tuner_addr; - if (UNSET != bttv_tvcards[btv->c.type].tuner_type) if (UNSET == btv->tuner_type) btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type; @@ -3496,40 +3491,6 @@ void __devinit bttv_init_card2(struct bttv *btv) if (UNSET == btv->tuner_type) btv->tuner_type = TUNER_ABSENT; - if (btv->tuner_type != TUNER_ABSENT) { - struct tuner_setup tun_setup; - - /* Load tuner module before issuing tuner config call! */ - if (bttv_tvcards[btv->c.type].has_radio) - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_RADIO)); - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); - v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, "tuner", "tuner", - v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); - - tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; - tun_setup.type = btv->tuner_type; - tun_setup.addr = addr; - - if (bttv_tvcards[btv->c.type].has_radio) - tun_setup.mode_mask |= T_RADIO; - - bttv_call_all(btv, tuner, s_type_addr, &tun_setup); - } - - if (btv->tda9887_conf) { - struct v4l2_priv_tun_config tda9887_cfg; - - tda9887_cfg.tuner = TUNER_TDA9887; - tda9887_cfg.priv = &btv->tda9887_conf; - - bttv_call_all(btv, tuner, s_config, &tda9887_cfg); - } - btv->dig = bttv_tvcards[btv->c.type].has_dig_in ? bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET; btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ? @@ -3540,15 +3501,15 @@ void __devinit bttv_init_card2(struct bttv *btv) btv->has_remote = remote[btv->c.nr]; if (bttv_tvcards[btv->c.type].has_radio) - btv->has_radio=1; + btv->has_radio = 1; if (bttv_tvcards[btv->c.type].has_remote) - btv->has_remote=1; + btv->has_remote = 1; if (!bttv_tvcards[btv->c.type].no_gpioirq) - btv->gpioirq=1; + btv->gpioirq = 1; if (bttv_tvcards[btv->c.type].volume_gpio) - btv->volume_gpio=bttv_tvcards[btv->c.type].volume_gpio; + btv->volume_gpio = bttv_tvcards[btv->c.type].volume_gpio; if (bttv_tvcards[btv->c.type].audio_mode_gpio) - btv->audio_mode_gpio=bttv_tvcards[btv->c.type].audio_mode_gpio; + btv->audio_mode_gpio = bttv_tvcards[btv->c.type].audio_mode_gpio; if (btv->tuner_type == TUNER_ABSENT) return; /* no tuner or related drivers to load */ @@ -3666,6 +3627,49 @@ no_audio: } +/* initialize the tuner */ +void __devinit bttv_init_tuner(struct bttv *btv) +{ + int addr = ADDR_UNSET; + + if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) + addr = bttv_tvcards[btv->c.type].tuner_addr; + + if (btv->tuner_type != TUNER_ABSENT) { + struct tuner_setup tun_setup; + + /* Load tuner module before issuing tuner config call! */ + if (bttv_tvcards[btv->c.type].has_radio) + v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tuner", "tuner", + v4l2_i2c_tuner_addrs(ADDRS_RADIO)); + v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tuner", "tuner", + v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); + v4l2_i2c_new_probed_subdev(&btv->c.v4l2_dev, + &btv->c.i2c_adap, "tuner", "tuner", + v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); + + tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; + tun_setup.type = btv->tuner_type; + tun_setup.addr = addr; + + if (bttv_tvcards[btv->c.type].has_radio) + tun_setup.mode_mask |= T_RADIO; + + bttv_call_all(btv, tuner, s_type_addr, &tun_setup); + } + + if (btv->tda9887_conf) { + struct v4l2_priv_tun_config tda9887_cfg; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &btv->tda9887_conf; + + bttv_call_all(btv, tuner, s_config, &tda9887_cfg); + } +} + /* ----------------------------------------------------------------------- */ static void modtec_eeprom(struct bttv *btv) diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index d147d29..8cc6dd2 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c @@ -4419,6 +4419,7 @@ static int __devinit bttv_probe(struct pci_dev *dev, /* some card-specific stuff (needs working i2c) */ bttv_init_card2(btv); + bttv_init_tuner(btv); init_irqreg(btv); /* register video4linux + input */ diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h index 3d36daf..3ec2402 100644 --- a/drivers/media/video/bt8xx/bttv.h +++ b/drivers/media/video/bt8xx/bttv.h @@ -283,6 +283,7 @@ extern struct tvcard bttv_tvcards[]; extern void bttv_idcard(struct bttv *btv); extern void bttv_init_card1(struct bttv *btv); extern void bttv_init_card2(struct bttv *btv); +extern void bttv_init_tuner(struct bttv *btv); /* card-specific funtions */ extern void tea5757_set_freq(struct bttv *btv, unsigned short freq); diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c index 428f0c4..e0cf21e 100644 --- a/drivers/media/video/cx23885/cx23885-417.c +++ b/drivers/media/video/cx23885/cx23885-417.c @@ -58,7 +58,8 @@ MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages"); #define dprintk(level, fmt, arg...)\ do { if (v4l_debug >= level) \ - printk(KERN_DEBUG "%s: " fmt, dev->name , ## arg);\ + printk(KERN_DEBUG "%s: " fmt, \ + (dev) ? dev->name : "cx23885[?]", ## arg); \ } while (0) static struct cx23885_tvnorm cx23885_tvnorms[] = { @@ -1677,6 +1678,7 @@ static struct v4l2_file_operations mpeg_fops = { .read = mpeg_read, .poll = mpeg_poll, .mmap = mpeg_mmap, + .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index ebd24a2..320f1f6 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c @@ -58,8 +58,6 @@ static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; module_param_array(card, int, NULL, 0444); MODULE_PARM_DESC(card, "card type"); -#define MT9V011_VERSION 0x8243 - /* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS */ static unsigned long em28xx_devused; @@ -159,6 +157,20 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { { -1, -1, -1, -1}, }; +/* Pinnacle Hybrid Pro eb1a:2881 */ +static struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = { + {EM28XX_R08_GPIO, 0xfd, ~EM_GPIO_4, 10}, + { -1, -1, -1, -1}, +}; + +static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = { + {EM28XX_R08_GPIO, 0x6e, ~EM_GPIO_4, 10}, + {EM2880_R04_GPO, 0x04, 0xff, 100},/* zl10353 reset */ + {EM2880_R04_GPO, 0x0c, 0xff, 1}, + { -1, -1, -1, -1}, +}; + + /* Callback for the most boards */ static struct em28xx_reg_seq default_tuner_gpio[] = { {EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10}, @@ -205,13 +217,15 @@ static struct em28xx_reg_seq silvercrest_reg_seq[] = { */ struct em28xx_board em28xx_boards[] = { [EM2750_BOARD_UNKNOWN] = { - .name = "Unknown EM2750/EM2751 webcam grabber", + .name = "EM2710/EM2750/EM2751 webcam grabber", .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, - .tuner_type = TUNER_ABSENT, /* This is a webcam */ + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, .amux = EM28XX_AMUX_VIDEO, + .gpio = silvercrest_reg_seq, } }, }, [EM2800_BOARD_UNKNOWN] = { @@ -233,13 +247,15 @@ struct em28xx_board em28xx_boards[] = { [EM2820_BOARD_UNKNOWN] = { .name = "Unknown EM2750/28xx video grabber", .tuner_type = TUNER_ABSENT, + .is_webcam = 1, /* To enable sensor probe */ }, [EM2750_BOARD_DLCW_130] = { /* Beijing Huaqi Information Digital Technology Co., Ltd */ .name = "Huaqi DLCW-130", .valid = EM28XX_BOARD_NOT_VALIDATED, .xclk = EM28XX_XCLK_FREQUENCY_48MHZ, - .tuner_type = TUNER_ABSENT, /* This is a webcam */ + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, @@ -440,7 +456,8 @@ struct em28xx_board em28xx_boards[] = { [EM2820_BOARD_VIDEOLOGY_20K14XUSB] = { .name = "Videology 20K14XUSB USB2.0", .valid = EM28XX_BOARD_NOT_VALIDATED, - .tuner_type = TUNER_ABSENT, /* This is a webcam */ + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, @@ -450,8 +467,7 @@ struct em28xx_board em28xx_boards[] = { [EM2820_BOARD_SILVERCREST_WEBCAM] = { .name = "Silvercrest Webcam 1.3mpix", .tuner_type = TUNER_ABSENT, - .is_27xx = 1, - .decoder = EM28XX_MT9V011, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, @@ -500,7 +516,8 @@ struct em28xx_board em28xx_boards[] = { /* Beijing Huaqi Information Digital Technology Co., Ltd */ .name = "NetGMBH Cam", .valid = EM28XX_BOARD_NOT_VALIDATED, - .tuner_type = TUNER_ABSENT, /* This is a webcam */ + .tuner_type = TUNER_ABSENT, + .is_webcam = 1, .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = 0, @@ -1250,25 +1267,26 @@ struct em28xx_board em28xx_boards[] = { }, [EM2881_BOARD_PINNACLE_HYBRID_PRO] = { .name = "Pinnacle Hybrid Pro", - .valid = EM28XX_BOARD_NOT_VALIDATED, .tuner_type = TUNER_XC2028, .tuner_gpio = default_tuner_gpio, .decoder = EM28XX_TVP5150, + .has_dvb = 1, + .dvb_gpio = pinnacle_hybrid_pro_digital, .input = { { .type = EM28XX_VMUX_TELEVISION, .vmux = TVP5150_COMPOSITE0, .amux = EM28XX_AMUX_VIDEO, - .gpio = default_analog, + .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_COMPOSITE1, .vmux = TVP5150_COMPOSITE1, .amux = EM28XX_AMUX_LINE_IN, - .gpio = default_analog, + .gpio = pinnacle_hybrid_pro_analog, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, .amux = EM28XX_AMUX_LINE_IN, - .gpio = default_analog, + .gpio = pinnacle_hybrid_pro_analog, } }, }, [EM2882_BOARD_PINNACLE_HYBRID_PRO] = { @@ -1638,6 +1656,7 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = { {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028}, {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028}, + {0xb8846b20, EM2881_BOARD_PINNACLE_HYBRID_PRO, TUNER_XC2028}, }; /* I2C devicelist hash table for devices with generic USB IDs */ @@ -1704,6 +1723,32 @@ static inline void em28xx_set_model(struct em28xx *dev) EM28XX_I2C_FREQ_100_KHZ; } +/* FIXME: Should be replaced by a proper mt9m001 driver */ +static int em28xx_initialize_mt9m001(struct em28xx *dev) +{ + int i; + unsigned char regs[][3] = { + { 0x0d, 0x00, 0x01, }, + { 0x0d, 0x00, 0x00, }, + { 0x04, 0x05, 0x00, }, /* hres = 1280 */ + { 0x03, 0x04, 0x00, }, /* vres = 1024 */ + { 0x20, 0x11, 0x00, }, + { 0x06, 0x00, 0x10, }, + { 0x2b, 0x00, 0x24, }, + { 0x2e, 0x00, 0x24, }, + { 0x35, 0x00, 0x24, }, + { 0x2d, 0x00, 0x20, }, + { 0x2c, 0x00, 0x20, }, + { 0x09, 0x0a, 0xd4, }, + { 0x35, 0x00, 0x57, }, + }; + + for (i = 0; i < ARRAY_SIZE(regs); i++) + i2c_master_send(&dev->i2c_client, ®s[i][0], 3); + + return 0; +} + /* HINT method: webcam I2C chips * * This method work for webcams with Micron sensors @@ -1716,9 +1761,6 @@ static int em28xx_hint_sensor(struct em28xx *dev) __be16 version_be; u16 version; - if (dev->model != EM2820_BOARD_UNKNOWN) - return 0; - dev->i2c_client.addr = 0xba >> 1; cmd = 0; i2c_master_send(&dev->i2c_client, &cmd, 1); @@ -1729,16 +1771,38 @@ static int em28xx_hint_sensor(struct em28xx *dev) version = be16_to_cpu(version_be); switch (version) { - case MT9V011_VERSION: + case 0x8243: /* mt9v011 640x480 1.3 Mpix sensor */ dev->model = EM2820_BOARD_SILVERCREST_WEBCAM; sensor_name = "mt9v011"; + dev->em28xx_sensor = EM28XX_MT9V011; + dev->sensor_xres = 640; + dev->sensor_yres = 480; + dev->sensor_xtal = 6300000; + + /* probably means GRGB 16 bit bayer */ + dev->vinmode = 0x0d; + dev->vinctl = 0x00; + + break; + case 0x8431: + dev->model = EM2750_BOARD_UNKNOWN; + sensor_name = "mt9m001"; + dev->em28xx_sensor = EM28XX_MT9M001; + em28xx_initialize_mt9m001(dev); + dev->sensor_xres = 1280; + dev->sensor_yres = 1024; + + /* probably means BGGR 16 bit bayer */ + dev->vinmode = 0x0c; + dev->vinctl = 0x00; + break; default: - printk("Unknown Sensor 0x%04x\n", be16_to_cpu(version)); + printk("Unknown Micron Sensor 0x%04x\n", be16_to_cpu(version)); return -EINVAL; } - em28xx_errdev("Sensor is %s, assuming that webcam is %s\n", + em28xx_errdev("Sensor is %s, using model %s entry.\n", sensor_name, em28xx_boards[dev->model].name); return 0; @@ -1772,10 +1836,7 @@ void em28xx_pre_card_setup(struct em28xx *dev) em28xx_info("chip ID is em2750\n"); break; case CHIP_ID_EM2820: - if (dev->board.is_27xx) - em28xx_info("chip is em2710\n"); - else - em28xx_info("chip ID is em2820\n"); + em28xx_info("chip ID is em2710 or em2820\n"); break; case CHIP_ID_EM2840: em28xx_info("chip ID is em2840\n"); @@ -1929,6 +1990,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) ctl->demod = XC3028_FE_ZARLINK456; break; case EM2880_BOARD_TERRATEC_HYBRID_XS: + case EM2881_BOARD_PINNACLE_HYBRID_PRO: ctl->demod = XC3028_FE_ZARLINK456; break; case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: @@ -2225,6 +2287,7 @@ void em28xx_card_setup(struct em28xx *dev) em28xx_set_mode() in em28xx_pre_card_setup() was a no-op, so make the call now so the analog GPIOs are set properly before probing the i2c bus. */ + em28xx_gpio_set(dev, dev->board.tuner_gpio); em28xx_set_mode(dev, EM28XX_ANALOG_MODE); break; case EM2820_BOARD_SILVERCREST_WEBCAM: @@ -2262,9 +2325,14 @@ void em28xx_card_setup(struct em28xx *dev) v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tvp5150", "tvp5150", tvp5150_addrs); - if (dev->board.decoder == EM28XX_MT9V011) - v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, &dev->i2c_adap, - "mt9v011", "mt9v011", mt9v011_addrs); + if (dev->em28xx_sensor == EM28XX_MT9V011) { + struct v4l2_subdev *sd; + + sd = v4l2_i2c_new_probed_subdev(&dev->v4l2_dev, + &dev->i2c_adap, "mt9v011", "mt9v011", mt9v011_addrs); + v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal); + } + if (dev->board.adecoder == EM28XX_TVAUDIO) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, @@ -2410,7 +2478,19 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev, return errCode; } - em28xx_hint_sensor(dev); + /* + * Default format, used for tvp5150 or saa711x output formats + */ + dev->vinmode = 0x10; + dev->vinctl = 0x11; + + /* + * If the device can be a webcam, seek for a sensor. + * If sensor is not found, then it isn't a webcam. + */ + if (dev->board.is_webcam) + if (em28xx_hint_sensor(dev) < 0) + dev->board.is_webcam = 0; /* Do board specific init and eeprom reading */ em28xx_card_setup(dev); diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c index 079ab4d..5b78e19 100644 --- a/drivers/media/video/em28xx/em28xx-core.c +++ b/drivers/media/video/em28xx/em28xx-core.c @@ -648,28 +648,17 @@ int em28xx_capture_start(struct em28xx *dev, int start) int em28xx_set_outfmt(struct em28xx *dev) { int ret; - int vinmode, vinctl, outfmt; - - outfmt = dev->format->reg; - - if (dev->board.is_27xx) { - vinmode = 0x0d; - vinctl = 0x00; - } else { - vinmode = 0x10; - vinctl = 0x11; - } ret = em28xx_write_reg_bits(dev, EM28XX_R27_OUTFMT, - outfmt | 0x20, 0xff); + dev->format->reg | 0x20, 0xff); if (ret < 0) return ret; - ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, vinmode); + ret = em28xx_write_reg(dev, EM28XX_R10_VINMODE, dev->vinmode); if (ret < 0) return ret; - return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, vinctl); + return em28xx_write_reg(dev, EM28XX_R11_VINCTRL, dev->vinctl); } static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax, @@ -707,10 +696,7 @@ static int em28xx_scaler_set(struct em28xx *dev, u16 h, u16 v) u8 mode; /* the em2800 scaler only supports scaling down to 50% */ - if (dev->board.is_27xx) { - /* FIXME: Don't use the scaler yet */ - mode = 0; - } else if (dev->board.is_em2800) { + if (dev->board.is_em2800) { mode = (v ? 0x20 : 0x00) | (h ? 0x10 : 0x00); } else { u8 buf[2]; diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c index 3da97c3..cf0ac7f 100644 --- a/drivers/media/video/em28xx/em28xx-dvb.c +++ b/drivers/media/video/em28xx/em28xx-dvb.c @@ -31,6 +31,8 @@ #include "lgdt330x.h" #include "zl10353.h" #include "s5h1409.h" +#include "mt352.h" +#include "mt352_priv.h" /* FIXME */ MODULE_DESCRIPTION("driver for em28xx based DVB cards"); MODULE_AUTHOR("Mauro Carvalho Chehab "); @@ -243,7 +245,7 @@ static struct s5h1409_config em28xx_s5h1409_with_xc3028 = { .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK }; -static struct zl10353_config em28xx_terratec_xs_zl10353_xc3028 = { +static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = { .demod_address = (0x1e >> 1), .no_tuner = 1, .disable_i2c_gate_ctrl = 1, @@ -258,6 +260,41 @@ static struct drx397xD_config em28xx_drx397xD_with_xc3028 = { }; #endif +static int mt352_terratec_xs_init(struct dvb_frontend *fe) +{ + /* Values extracted from a USB trace of the Terratec Windows driver */ + static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x2c }; + static u8 reset[] = { RESET, 0x80 }; + static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 }; + static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0xa0 }; + static u8 input_freq_cfg[] = { INPUT_FREQ_1, 0x31, 0xb8 }; + static u8 rs_err_cfg[] = { RS_ERR_PER_1, 0x00, 0x4d }; + static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; + static u8 trl_nom_cfg[] = { TRL_NOMINAL_RATE_1, 0x64, 0x00 }; + static u8 tps_given_cfg[] = { TPS_GIVEN_1, 0x40, 0x80, 0x50 }; + static u8 tuner_go[] = { TUNER_GO, 0x01}; + + mt352_write(fe, clock_config, sizeof(clock_config)); + udelay(200); + mt352_write(fe, reset, sizeof(reset)); + mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); + mt352_write(fe, agc_cfg, sizeof(agc_cfg)); + mt352_write(fe, input_freq_cfg, sizeof(input_freq_cfg)); + mt352_write(fe, rs_err_cfg, sizeof(rs_err_cfg)); + mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); + mt352_write(fe, trl_nom_cfg, sizeof(trl_nom_cfg)); + mt352_write(fe, tps_given_cfg, sizeof(tps_given_cfg)); + mt352_write(fe, tuner_go, sizeof(tuner_go)); + return 0; +} + +static struct mt352_config terratec_xs_mt352_cfg = { + .demod_address = (0x1e >> 1), + .no_tuner = 1, + .if2 = 45600, + .demod_init = mt352_terratec_xs_init, +}; + /* ------------------------------------------------------------------ */ static int attach_xc3028(u8 addr, struct em28xx *dev) @@ -440,7 +477,6 @@ static int dvb_init(struct em28xx *dev) goto out_free; } break; - case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: case EM2880_BOARD_KWORLD_DVB_310U: case EM2880_BOARD_EMPIRE_DUAL_TV: dvb->frontend = dvb_attach(zl10353_attach, @@ -451,20 +487,28 @@ static int dvb_init(struct em28xx *dev) goto out_free; } break; + case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: + dvb->frontend = dvb_attach(zl10353_attach, + &em28xx_zl10353_xc3028_no_i2c_gate, + &dev->i2c_adap); + if (attach_xc3028(0x61, dev) < 0) { + result = -EINVAL; + goto out_free; + } + break; case EM2880_BOARD_TERRATEC_HYBRID_XS: + case EM2881_BOARD_PINNACLE_HYBRID_PRO: dvb->frontend = dvb_attach(zl10353_attach, - &em28xx_terratec_xs_zl10353_xc3028, + &em28xx_zl10353_xc3028_no_i2c_gate, &dev->i2c_adap); if (dvb->frontend == NULL) { /* This board could have either a zl10353 or a mt352. If the chip id isn't for zl10353, try mt352 */ - - /* FIXME: make support for mt352 work */ - printk(KERN_ERR "version of this board with mt352 not " - "currently supported\n"); - result = -EINVAL; - goto out_free; + dvb->frontend = dvb_attach(mt352_attach, + &terratec_xs_mt352_cfg, + &dev->i2c_adap); } + if (attach_xc3028(0x61, dev) < 0) { result = -EINVAL; goto out_free; diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c index 14316c9..ff37b4c 100644 --- a/drivers/media/video/em28xx/em28xx-video.c +++ b/drivers/media/video/em28xx/em28xx-video.c @@ -657,8 +657,8 @@ static void get_scale(struct em28xx *dev, unsigned int width, unsigned int height, unsigned int *hscale, unsigned int *vscale) { - unsigned int maxw = norm_maxw(dev); - unsigned int maxh = norm_maxh(dev); + unsigned int maxw = norm_maxw(dev); + unsigned int maxh = norm_maxh(dev); *hscale = (((unsigned long)maxw) << 12) / width - 4096L; if (*hscale >= 0x4000) @@ -726,11 +726,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, return -EINVAL; } - if (dev->board.is_27xx) { - /* FIXME: This is the only supported fmt */ - width = 640; - height = 480; - } else if (dev->board.is_em2800) { + if (dev->board.is_em2800) { /* the em2800 can only scale down to 50% */ height = height > (3 * maxh / 4) ? maxh : maxh / 2; width = width > (3 * maxw / 4) ? maxw : maxw / 2; @@ -767,12 +763,6 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc, { struct em28xx_fmt *fmt; - /* FIXME: This is the only supported fmt */ - if (dev->board.is_27xx) { - width = 640; - height = 480; - } - fmt = format_by_fourcc(fourcc); if (!fmt) return -EINVAL; diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h index d90fef4..45bd513 100644 --- a/drivers/media/video/em28xx/em28xx.h +++ b/drivers/media/video/em28xx/em28xx.h @@ -358,10 +358,15 @@ struct em28xx_input { #define INPUT(nr) (&em28xx_boards[dev->model].input[nr]) enum em28xx_decoder { - EM28XX_NODECODER, + EM28XX_NODECODER = 0, EM28XX_TVP5150, EM28XX_SAA711X, +}; + +enum em28xx_sensor { + EM28XX_NOSENSOR = 0, EM28XX_MT9V011, + EM28XX_MT9M001, }; enum em28xx_adecoder { @@ -390,7 +395,7 @@ struct em28xx_board { unsigned int max_range_640_480:1; unsigned int has_dvb:1; unsigned int has_snapshot_button:1; - unsigned int is_27xx:1; + unsigned int is_webcam:1; unsigned int valid:1; unsigned char xclk, i2c_speed; @@ -474,6 +479,14 @@ struct em28xx { struct v4l2_device v4l2_dev; struct em28xx_board board; + /* Webcam specific fields */ + enum em28xx_sensor em28xx_sensor; + int sensor_xres, sensor_yres; + int sensor_xtal; + + /* Vinmode/Vinctl used at the driver */ + int vinmode, vinctl; + unsigned int stream_on:1; /* Locks streams */ unsigned int has_audio_class:1; unsigned int has_alsa_audio:1; @@ -754,17 +767,23 @@ static inline int em28xx_gamma_set(struct em28xx *dev, s32 val) /*FIXME: maxw should be dependent of alt mode */ static inline unsigned int norm_maxw(struct em28xx *dev) { + if (dev->board.is_webcam) + return dev->sensor_xres; + if (dev->board.max_range_640_480) return 640; - else - return 720; + + return 720; } static inline unsigned int norm_maxh(struct em28xx *dev) { + if (dev->board.is_webcam) + return dev->sensor_yres; + if (dev->board.max_range_640_480) return 480; - else - return (dev->norm & V4L2_STD_625_50) ? 576 : 480; + + return (dev->norm & V4L2_STD_625_50) ? 576 : 480; } #endif diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig index 578dc4f..34f46f2 100644 --- a/drivers/media/video/gspca/Kconfig +++ b/drivers/media/video/gspca/Kconfig @@ -102,6 +102,22 @@ config USB_GSPCA_PAC7311 To compile this driver as a module, choose M here: the module will be called gspca_pac7311. +config USB_GSPCA_SN9C20X + tristate "SN9C20X USB Camera Driver" + depends on VIDEO_V4L2 && USB_GSPCA + help + Say Y here if you want support for cameras based on the + sn9c20x chips (SN9C201 and SN9C202). + + To compile this driver as a module, choose M here: the + module will be called gspca_sn9c20x. + +config USB_GSPCA_SN9C20X_EVDEV + bool "Enable evdev support" + depends on USB_GSPCA_SN9C20X + ---help--- + Say Y here in order to enable evdev support for sn9c20x webcam button. + config USB_GSPCA_SONIXB tristate "SONIX Bayer USB Camera Driver" depends on VIDEO_V4L2 && USB_GSPCA diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile index 8a6643e..f6d3b86 100644 --- a/drivers/media/video/gspca/Makefile +++ b/drivers/media/video/gspca/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o +obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o obj-$(CONFIG_USB_GSPCA_SPCA500) += gspca_spca500.o @@ -35,6 +36,7 @@ gspca_ov519-objs := ov519.o gspca_ov534-objs := ov534.o gspca_pac207-objs := pac207.o gspca_pac7311-objs := pac7311.o +gspca_sn9c20x-objs := sn9c20x.o gspca_sonixb-objs := sonixb.o gspca_sonixj-objs := sonixj.o gspca_spca500-objs := spca500.o diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c index 219cfa6..8d48ea1 100644 --- a/drivers/media/video/gspca/conex.c +++ b/drivers/media/video/gspca/conex.c @@ -846,6 +846,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index 1e89600..b8561df 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c @@ -727,6 +727,74 @@ static int gspca_get_mode(struct gspca_dev *gspca_dev, return -EINVAL; } +#ifdef CONFIG_VIDEO_ADV_DEBUG +static int vidioc_g_register(struct file *file, void *priv, + struct v4l2_dbg_register *reg) +{ + int ret; + struct gspca_dev *gspca_dev = priv; + + if (!gspca_dev->sd_desc->get_chip_ident) + return -EINVAL; + + if (!gspca_dev->sd_desc->get_register) + return -EINVAL; + + if (mutex_lock_interruptible(&gspca_dev->usb_lock)) + return -ERESTARTSYS; + if (gspca_dev->present) + ret = gspca_dev->sd_desc->get_register(gspca_dev, reg); + else + ret = -ENODEV; + mutex_unlock(&gspca_dev->usb_lock); + + return ret; +} + +static int vidioc_s_register(struct file *file, void *priv, + struct v4l2_dbg_register *reg) +{ + int ret; + struct gspca_dev *gspca_dev = priv; + + if (!gspca_dev->sd_desc->get_chip_ident) + return -EINVAL; + + if (!gspca_dev->sd_desc->set_register) + return -EINVAL; + + if (mutex_lock_interruptible(&gspca_dev->usb_lock)) + return -ERESTARTSYS; + if (gspca_dev->present) + ret = gspca_dev->sd_desc->set_register(gspca_dev, reg); + else + ret = -ENODEV; + mutex_unlock(&gspca_dev->usb_lock); + + return ret; +} +#endif + +static int vidioc_g_chip_ident(struct file *file, void *priv, + struct v4l2_dbg_chip_ident *chip) +{ + int ret; + struct gspca_dev *gspca_dev = priv; + + if (!gspca_dev->sd_desc->get_chip_ident) + return -EINVAL; + + if (mutex_lock_interruptible(&gspca_dev->usb_lock)) + return -ERESTARTSYS; + if (gspca_dev->present) + ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip); + else + ret = -ENODEV; + mutex_unlock(&gspca_dev->usb_lock); + + return ret; +} + static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmtdesc) { @@ -1883,6 +1951,11 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = { .vidioc_s_parm = vidioc_s_parm, .vidioc_s_std = vidioc_s_std, .vidioc_enum_framesizes = vidioc_enum_framesizes, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .vidioc_g_register = vidioc_g_register, + .vidioc_s_register = vidioc_s_register, +#endif + .vidioc_g_chip_ident = vidioc_g_chip_ident, #ifdef CONFIG_VIDEO_V4L1_COMPAT .vidiocgmbuf = vidiocgmbuf, #endif diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h index bd1faff..46c4eff 100644 --- a/drivers/media/video/gspca/gspca.h +++ b/drivers/media/video/gspca/gspca.h @@ -69,6 +69,10 @@ typedef void (*cam_v_op) (struct gspca_dev *); typedef int (*cam_cf_op) (struct gspca_dev *, const struct usb_device_id *); typedef int (*cam_jpg_op) (struct gspca_dev *, struct v4l2_jpegcompression *); +typedef int (*cam_reg_op) (struct gspca_dev *, + struct v4l2_dbg_register *); +typedef int (*cam_ident_op) (struct gspca_dev *, + struct v4l2_dbg_chip_ident *); typedef int (*cam_streamparm_op) (struct gspca_dev *, struct v4l2_streamparm *); typedef int (*cam_qmnu_op) (struct gspca_dev *, @@ -105,6 +109,11 @@ struct sd_desc { cam_qmnu_op querymenu; cam_streamparm_op get_streamparm; cam_streamparm_op set_streamparm; +#ifdef CONFIG_VIDEO_ADV_DEBUG + cam_reg_op set_register; + cam_reg_op get_register; +#endif + cam_ident_op get_chip_ident; }; /* packet types when moving from iso buf to frame buf */ diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c index 191bcd7..0163903 100644 --- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c +++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c @@ -476,9 +476,6 @@ static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val) err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); if (err < 0) return err; - err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); - if (err < 0) - return err; err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); if (err < 0) @@ -524,9 +521,6 @@ static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val) err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); if (err < 0) return err; - err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); - if (err < 0) - return err; err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1); if (err < 0) diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c index 75e8d14..de769ca 100644 --- a/drivers/media/video/gspca/mars.c +++ b/drivers/media/video/gspca/mars.c @@ -201,6 +201,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c new file mode 100644 index 0000000..fcfbbd3 --- /dev/null +++ b/drivers/media/video/gspca/sn9c20x.c @@ -0,0 +1,2434 @@ +/* + * Sonix sn9c201 sn9c202 library + * Copyright (C) 2008-2009 microdia project + * Copyright (C) 2009 Brian Johnson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +#include +#include +#include +#include +#endif + +#include "gspca.h" +#include "jpeg.h" + +#include + +MODULE_AUTHOR("Brian Johnson , " + "microdia project "); +MODULE_DESCRIPTION("GSPCA/SN9C20X USB Camera Driver"); +MODULE_LICENSE("GPL"); + +#define MODULE_NAME "sn9c20x" + +#define MODE_RAW 0x10 +#define MODE_JPEG 0x20 +#define MODE_SXGA 0x80 + +#define SENSOR_OV9650 0 +#define SENSOR_OV9655 1 +#define SENSOR_SOI968 2 +#define SENSOR_OV7660 3 +#define SENSOR_OV7670 4 +#define SENSOR_MT9V011 5 +#define SENSOR_MT9V111 6 +#define SENSOR_MT9V112 7 +#define SENSOR_MT9M001 8 +#define SENSOR_MT9M111 9 +#define SENSOR_HV7131R 10 +#define SENSOR_MT9VPRB 20 + +/* specific webcam descriptor */ +struct sd { + struct gspca_dev gspca_dev; + +#define MIN_AVG_LUM 80 +#define MAX_AVG_LUM 130 + atomic_t avg_lum; + u8 old_step; + u8 older_step; + u8 exposure_step; + + u8 brightness; + u8 contrast; + u8 saturation; + s16 hue; + u8 gamma; + u8 red; + u8 blue; + + u8 hflip; + u8 vflip; + u8 gain; + u16 exposure; + u8 auto_exposure; + + u8 i2c_addr; + u8 sensor; + u8 hstart; + u8 vstart; + + u8 *jpeg_hdr; + u8 quality; + +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV + struct input_dev *input_dev; + u8 input_gpio; + struct task_struct *input_task; +#endif +}; + +static int sd_setbrightness(struct gspca_dev *gspca_dev, s32 val); +static int sd_getbrightness(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setcontrast(struct gspca_dev *gspca_dev, s32 val); +static int sd_getcontrast(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setsaturation(struct gspca_dev *gspca_dev, s32 val); +static int sd_getsaturation(struct gspca_dev *gspca_dev, s32 *val); +static int sd_sethue(struct gspca_dev *gspca_dev, s32 val); +static int sd_gethue(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setgamma(struct gspca_dev *gspca_dev, s32 val); +static int sd_getgamma(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setredbalance(struct gspca_dev *gspca_dev, s32 val); +static int sd_getredbalance(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setbluebalance(struct gspca_dev *gspca_dev, s32 val); +static int sd_getbluebalance(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setvflip(struct gspca_dev *gspca_dev, s32 val); +static int sd_getvflip(struct gspca_dev *gspca_dev, s32 *val); +static int sd_sethflip(struct gspca_dev *gspca_dev, s32 val); +static int sd_gethflip(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setgain(struct gspca_dev *gspca_dev, s32 val); +static int sd_getgain(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setexposure(struct gspca_dev *gspca_dev, s32 val); +static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val); +static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val); +static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val); + +static struct ctrl sd_ctrls[] = { + { +#define BRIGHTNESS_IDX 0 + { + .id = V4L2_CID_BRIGHTNESS, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Brightness", + .minimum = 0, + .maximum = 0xff, + .step = 1, +#define BRIGHTNESS_DEFAULT 0x7f + .default_value = BRIGHTNESS_DEFAULT, + }, + .set = sd_setbrightness, + .get = sd_getbrightness, + }, + { +#define CONTRAST_IDX 1 + { + .id = V4L2_CID_CONTRAST, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Contrast", + .minimum = 0, + .maximum = 0xff, + .step = 1, +#define CONTRAST_DEFAULT 0x7f + .default_value = CONTRAST_DEFAULT, + }, + .set = sd_setcontrast, + .get = sd_getcontrast, + }, + { +#define SATURATION_IDX 2 + { + .id = V4L2_CID_SATURATION, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Saturation", + .minimum = 0, + .maximum = 0xff, + .step = 1, +#define SATURATION_DEFAULT 0x7f + .default_value = SATURATION_DEFAULT, + }, + .set = sd_setsaturation, + .get = sd_getsaturation, + }, + { +#define HUE_IDX 3 + { + .id = V4L2_CID_HUE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Hue", + .minimum = -180, + .maximum = 180, + .step = 1, +#define HUE_DEFAULT 0 + .default_value = HUE_DEFAULT, + }, + .set = sd_sethue, + .get = sd_gethue, + }, + { +#define GAMMA_IDX 4 + { + .id = V4L2_CID_GAMMA, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Gamma", + .minimum = 0, + .maximum = 0xff, + .step = 1, +#define GAMMA_DEFAULT 0x10 + .default_value = GAMMA_DEFAULT, + }, + .set = sd_setgamma, + .get = sd_getgamma, + }, + { +#define BLUE_IDX 5 + { + .id = V4L2_CID_BLUE_BALANCE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Blue Balance", + .minimum = 0, + .maximum = 0x7f, + .step = 1, +#define BLUE_DEFAULT 0x28 + .default_value = BLUE_DEFAULT, + }, + .set = sd_setbluebalance, + .get = sd_getbluebalance, + }, + { +#define RED_IDX 6 + { + .id = V4L2_CID_RED_BALANCE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Red Balance", + .minimum = 0, + .maximum = 0x7f, + .step = 1, +#define RED_DEFAULT 0x28 + .default_value = RED_DEFAULT, + }, + .set = sd_setredbalance, + .get = sd_getredbalance, + }, + { +#define HFLIP_IDX 7 + { + .id = V4L2_CID_HFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Horizontal Flip", + .minimum = 0, + .maximum = 1, + .step = 1, +#define HFLIP_DEFAULT 0 + .default_value = HFLIP_DEFAULT, + }, + .set = sd_sethflip, + .get = sd_gethflip, + }, + { +#define VFLIP_IDX 8 + { + .id = V4L2_CID_VFLIP, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Vertical Flip", + .minimum = 0, + .maximum = 1, + .step = 1, +#define VFLIP_DEFAULT 0 + .default_value = VFLIP_DEFAULT, + }, + .set = sd_setvflip, + .get = sd_getvflip, + }, + { +#define EXPOSURE_IDX 9 + { + .id = V4L2_CID_EXPOSURE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Exposure", + .minimum = 0, + .maximum = 0x1780, + .step = 1, +#define EXPOSURE_DEFAULT 0x33 + .default_value = EXPOSURE_DEFAULT, + }, + .set = sd_setexposure, + .get = sd_getexposure, + }, + { +#define GAIN_IDX 10 + { + .id = V4L2_CID_GAIN, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Gain", + .minimum = 0, + .maximum = 28, + .step = 1, +#define GAIN_DEFAULT 0x00 + .default_value = GAIN_DEFAULT, + }, + .set = sd_setgain, + .get = sd_getgain, + }, + { +#define AUTOGAIN_IDX 11 + { + .id = V4L2_CID_AUTOGAIN, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Auto Exposure", + .minimum = 0, + .maximum = 1, + .step = 1, +#define AUTO_EXPOSURE_DEFAULT 1 + .default_value = AUTO_EXPOSURE_DEFAULT, + }, + .set = sd_setautoexposure, + .get = sd_getautoexposure, + }, +}; + +static const struct v4l2_pix_format vga_mode[] = { + {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 240, + .sizeimage = 240 * 120, + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 0 | MODE_JPEG}, + {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 160, + .sizeimage = 160 * 120, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0 | MODE_RAW}, + {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 240, + .sizeimage = 240 * 120, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0}, + {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 480, + .sizeimage = 480 * 240 , + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 1 | MODE_JPEG}, + {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 320, + .sizeimage = 320 * 240 , + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1 | MODE_RAW}, + {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 480, + .sizeimage = 480 * 240 , + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1}, + {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 960, + .sizeimage = 960 * 480, + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 2 | MODE_JPEG}, + {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 640, + .sizeimage = 640 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2 | MODE_RAW}, + {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 960, + .sizeimage = 960 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2}, +}; + +static const struct v4l2_pix_format sxga_mode[] = { + {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 240, + .sizeimage = 240 * 120, + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 0 | MODE_JPEG}, + {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 160, + .sizeimage = 160 * 120, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0 | MODE_RAW}, + {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 240, + .sizeimage = 240 * 120, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0}, + {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 480, + .sizeimage = 480 * 240 , + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 1 | MODE_JPEG}, + {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 320, + .sizeimage = 320 * 240 , + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1 | MODE_RAW}, + {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 480, + .sizeimage = 480 * 240 , + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1}, + {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 960, + .sizeimage = 960 * 480, + .colorspace = V4L2_COLORSPACE_JPEG, + .priv = 2 | MODE_JPEG}, + {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 640, + .sizeimage = 640 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2 | MODE_RAW}, + {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, + .bytesperline = 960, + .sizeimage = 960 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2}, + {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, + .bytesperline = 1280, + .sizeimage = (1280 * 1024) + 64, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 3 | MODE_RAW | MODE_SXGA}, +}; + +static const int hsv_red_x[] = { + 41, 44, 46, 48, 50, 52, 54, 56, + 58, 60, 62, 64, 66, 68, 70, 72, + 74, 76, 78, 80, 81, 83, 85, 87, + 88, 90, 92, 93, 95, 97, 98, 100, + 101, 102, 104, 105, 107, 108, 109, 110, + 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 123, 124, 125, 125, + 126, 127, 127, 128, 128, 129, 129, 129, + 130, 130, 130, 130, 131, 131, 131, 131, + 131, 131, 131, 131, 130, 130, 130, 130, + 129, 129, 129, 128, 128, 127, 127, 126, + 125, 125, 124, 123, 122, 122, 121, 120, + 119, 118, 117, 116, 115, 114, 112, 111, + 110, 109, 107, 106, 105, 103, 102, 101, + 99, 98, 96, 94, 93, 91, 90, 88, + 86, 84, 83, 81, 79, 77, 75, 74, + 72, 70, 68, 66, 64, 62, 60, 58, + 56, 54, 52, 49, 47, 45, 43, 41, + 39, 36, 34, 32, 30, 28, 25, 23, + 21, 19, 16, 14, 12, 9, 7, 5, + 3, 0, -1, -3, -6, -8, -10, -12, + -15, -17, -19, -22, -24, -26, -28, -30, + -33, -35, -37, -39, -41, -44, -46, -48, + -50, -52, -54, -56, -58, -60, -62, -64, + -66, -68, -70, -72, -74, -76, -78, -80, + -81, -83, -85, -87, -88, -90, -92, -93, + -95, -97, -98, -100, -101, -102, -104, -105, + -107, -108, -109, -110, -112, -113, -114, -115, + -116, -117, -118, -119, -120, -121, -122, -123, + -123, -124, -125, -125, -126, -127, -127, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -127, -127, -126, -125, -125, -124, -123, + -122, -122, -121, -120, -119, -118, -117, -116, + -115, -114, -112, -111, -110, -109, -107, -106, + -105, -103, -102, -101, -99, -98, -96, -94, + -93, -91, -90, -88, -86, -84, -83, -81, + -79, -77, -75, -74, -72, -70, -68, -66, + -64, -62, -60, -58, -56, -54, -52, -49, + -47, -45, -43, -41, -39, -36, -34, -32, + -30, -28, -25, -23, -21, -19, -16, -14, + -12, -9, -7, -5, -3, 0, 1, 3, + 6, 8, 10, 12, 15, 17, 19, 22, + 24, 26, 28, 30, 33, 35, 37, 39, 41 +}; + +static const int hsv_red_y[] = { + 82, 80, 78, 76, 74, 73, 71, 69, + 67, 65, 63, 61, 58, 56, 54, 52, + 50, 48, 46, 44, 41, 39, 37, 35, + 32, 30, 28, 26, 23, 21, 19, 16, + 14, 12, 10, 7, 5, 3, 0, -1, + -3, -6, -8, -10, -13, -15, -17, -19, + -22, -24, -26, -29, -31, -33, -35, -38, + -40, -42, -44, -46, -48, -51, -53, -55, + -57, -59, -61, -63, -65, -67, -69, -71, + -73, -75, -77, -79, -81, -82, -84, -86, + -88, -89, -91, -93, -94, -96, -98, -99, + -101, -102, -104, -105, -106, -108, -109, -110, + -112, -113, -114, -115, -116, -117, -119, -120, + -120, -121, -122, -123, -124, -125, -126, -126, + -127, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, + -127, -127, -126, -125, -125, -124, -123, -122, + -121, -120, -119, -118, -117, -116, -115, -114, + -113, -111, -110, -109, -107, -106, -105, -103, + -102, -100, -99, -97, -96, -94, -92, -91, + -89, -87, -85, -84, -82, -80, -78, -76, + -74, -73, -71, -69, -67, -65, -63, -61, + -58, -56, -54, -52, -50, -48, -46, -44, + -41, -39, -37, -35, -32, -30, -28, -26, + -23, -21, -19, -16, -14, -12, -10, -7, + -5, -3, 0, 1, 3, 6, 8, 10, + 13, 15, 17, 19, 22, 24, 26, 29, + 31, 33, 35, 38, 40, 42, 44, 46, + 48, 51, 53, 55, 57, 59, 61, 63, + 65, 67, 69, 71, 73, 75, 77, 79, + 81, 82, 84, 86, 88, 89, 91, 93, + 94, 96, 98, 99, 101, 102, 104, 105, + 106, 108, 109, 110, 112, 113, 114, 115, + 116, 117, 119, 120, 120, 121, 122, 123, + 124, 125, 126, 126, 127, 128, 128, 129, + 129, 130, 130, 131, 131, 131, 131, 132, + 132, 132, 132, 132, 132, 132, 132, 132, + 132, 132, 132, 131, 131, 131, 130, 130, + 130, 129, 129, 128, 127, 127, 126, 125, + 125, 124, 123, 122, 121, 120, 119, 118, + 117, 116, 115, 114, 113, 111, 110, 109, + 107, 106, 105, 103, 102, 100, 99, 97, + 96, 94, 92, 91, 89, 87, 85, 84, 82 +}; + +static const int hsv_green_x[] = { + -124, -124, -125, -125, -125, -125, -125, -125, + -125, -126, -126, -125, -125, -125, -125, -125, + -125, -124, -124, -124, -123, -123, -122, -122, + -121, -121, -120, -120, -119, -118, -117, -117, + -116, -115, -114, -113, -112, -111, -110, -109, + -108, -107, -105, -104, -103, -102, -100, -99, + -98, -96, -95, -93, -92, -91, -89, -87, + -86, -84, -83, -81, -79, -77, -76, -74, + -72, -70, -69, -67, -65, -63, -61, -59, + -57, -55, -53, -51, -49, -47, -45, -43, + -41, -39, -37, -35, -33, -30, -28, -26, + -24, -22, -20, -18, -15, -13, -11, -9, + -7, -4, -2, 0, 1, 3, 6, 8, + 10, 12, 14, 17, 19, 21, 23, 25, + 27, 29, 32, 34, 36, 38, 40, 42, + 44, 46, 48, 50, 52, 54, 56, 58, + 60, 62, 64, 66, 68, 70, 71, 73, + 75, 77, 78, 80, 82, 83, 85, 87, + 88, 90, 91, 93, 94, 96, 97, 98, + 100, 101, 102, 104, 105, 106, 107, 108, + 109, 111, 112, 113, 113, 114, 115, 116, + 117, 118, 118, 119, 120, 120, 121, 122, + 122, 123, 123, 124, 124, 124, 125, 125, + 125, 125, 125, 125, 125, 126, 126, 125, + 125, 125, 125, 125, 125, 124, 124, 124, + 123, 123, 122, 122, 121, 121, 120, 120, + 119, 118, 117, 117, 116, 115, 114, 113, + 112, 111, 110, 109, 108, 107, 105, 104, + 103, 102, 100, 99, 98, 96, 95, 93, + 92, 91, 89, 87, 86, 84, 83, 81, + 79, 77, 76, 74, 72, 70, 69, 67, + 65, 63, 61, 59, 57, 55, 53, 51, + 49, 47, 45, 43, 41, 39, 37, 35, + 33, 30, 28, 26, 24, 22, 20, 18, + 15, 13, 11, 9, 7, 4, 2, 0, + -1, -3, -6, -8, -10, -12, -14, -17, + -19, -21, -23, -25, -27, -29, -32, -34, + -36, -38, -40, -42, -44, -46, -48, -50, + -52, -54, -56, -58, -60, -62, -64, -66, + -68, -70, -71, -73, -75, -77, -78, -80, + -82, -83, -85, -87, -88, -90, -91, -93, + -94, -96, -97, -98, -100, -101, -102, -104, + -105, -106, -107, -108, -109, -111, -112, -113, + -113, -114, -115, -116, -117, -118, -118, -119, + -120, -120, -121, -122, -122, -123, -123, -124, -124 +}; + +static const int hsv_green_y[] = { + -100, -99, -98, -97, -95, -94, -93, -91, + -90, -89, -87, -86, -84, -83, -81, -80, + -78, -76, -75, -73, -71, -70, -68, -66, + -64, -63, -61, -59, -57, -55, -53, -51, + -49, -48, -46, -44, -42, -40, -38, -36, + -34, -32, -30, -27, -25, -23, -21, -19, + -17, -15, -13, -11, -9, -7, -4, -2, + 0, 1, 3, 5, 7, 9, 11, 14, + 16, 18, 20, 22, 24, 26, 28, 30, + 32, 34, 36, 38, 40, 42, 44, 46, + 48, 50, 52, 54, 56, 58, 59, 61, + 63, 65, 67, 68, 70, 72, 74, 75, + 77, 78, 80, 82, 83, 85, 86, 88, + 89, 90, 92, 93, 95, 96, 97, 98, + 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 112, 113, 114, + 115, 115, 116, 116, 117, 117, 118, 118, + 119, 119, 119, 120, 120, 120, 120, 120, + 121, 121, 121, 121, 121, 121, 120, 120, + 120, 120, 120, 119, 119, 119, 118, 118, + 117, 117, 116, 116, 115, 114, 114, 113, + 112, 111, 111, 110, 109, 108, 107, 106, + 105, 104, 103, 102, 100, 99, 98, 97, + 95, 94, 93, 91, 90, 89, 87, 86, + 84, 83, 81, 80, 78, 76, 75, 73, + 71, 70, 68, 66, 64, 63, 61, 59, + 57, 55, 53, 51, 49, 48, 46, 44, + 42, 40, 38, 36, 34, 32, 30, 27, + 25, 23, 21, 19, 17, 15, 13, 11, + 9, 7, 4, 2, 0, -1, -3, -5, + -7, -9, -11, -14, -16, -18, -20, -22, + -24, -26, -28, -30, -32, -34, -36, -38, + -40, -42, -44, -46, -48, -50, -52, -54, + -56, -58, -59, -61, -63, -65, -67, -68, + -70, -72, -74, -75, -77, -78, -80, -82, + -83, -85, -86, -88, -89, -90, -92, -93, + -95, -96, -97, -98, -100, -101, -102, -103, + -104, -105, -106, -107, -108, -109, -110, -111, + -112, -112, -113, -114, -115, -115, -116, -116, + -117, -117, -118, -118, -119, -119, -119, -120, + -120, -120, -120, -120, -121, -121, -121, -121, + -121, -121, -120, -120, -120, -120, -120, -119, + -119, -119, -118, -118, -117, -117, -116, -116, + -115, -114, -114, -113, -112, -111, -111, -110, + -109, -108, -107, -106, -105, -104, -103, -102, -100 +}; + +static const int hsv_blue_x[] = { + 112, 113, 114, 114, 115, 116, 117, 117, + 118, 118, 119, 119, 120, 120, 120, 121, + 121, 121, 122, 122, 122, 122, 122, 122, + 122, 122, 122, 122, 122, 122, 121, 121, + 121, 120, 120, 120, 119, 119, 118, 118, + 117, 116, 116, 115, 114, 113, 113, 112, + 111, 110, 109, 108, 107, 106, 105, 104, + 103, 102, 100, 99, 98, 97, 95, 94, + 93, 91, 90, 88, 87, 85, 84, 82, + 80, 79, 77, 76, 74, 72, 70, 69, + 67, 65, 63, 61, 60, 58, 56, 54, + 52, 50, 48, 46, 44, 42, 40, 38, + 36, 34, 32, 30, 28, 26, 24, 22, + 19, 17, 15, 13, 11, 9, 7, 5, + 2, 0, -1, -3, -5, -7, -9, -12, + -14, -16, -18, -20, -22, -24, -26, -28, + -31, -33, -35, -37, -39, -41, -43, -45, + -47, -49, -51, -53, -54, -56, -58, -60, + -62, -64, -66, -67, -69, -71, -73, -74, + -76, -78, -79, -81, -83, -84, -86, -87, + -89, -90, -92, -93, -94, -96, -97, -98, + -99, -101, -102, -103, -104, -105, -106, -107, + -108, -109, -110, -111, -112, -113, -114, -114, + -115, -116, -117, -117, -118, -118, -119, -119, + -120, -120, -120, -121, -121, -121, -122, -122, + -122, -122, -122, -122, -122, -122, -122, -122, + -122, -122, -121, -121, -121, -120, -120, -120, + -119, -119, -118, -118, -117, -116, -116, -115, + -114, -113, -113, -112, -111, -110, -109, -108, + -107, -106, -105, -104, -103, -102, -100, -99, + -98, -97, -95, -94, -93, -91, -90, -88, + -87, -85, -84, -82, -80, -79, -77, -76, + -74, -72, -70, -69, -67, -65, -63, -61, + -60, -58, -56, -54, -52, -50, -48, -46, + -44, -42, -40, -38, -36, -34, -32, -30, + -28, -26, -24, -22, -19, -17, -15, -13, + -11, -9, -7, -5, -2, 0, 1, 3, + 5, 7, 9, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 31, 33, 35, 37, + 39, 41, 43, 45, 47, 49, 51, 53, + 54, 56, 58, 60, 62, 64, 66, 67, + 69, 71, 73, 74, 76, 78, 79, 81, + 83, 84, 86, 87, 89, 90, 92, 93, + 94, 96, 97, 98, 99, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112 +}; + +static const int hsv_blue_y[] = { + -11, -13, -15, -17, -19, -21, -23, -25, + -27, -29, -31, -33, -35, -37, -39, -41, + -43, -45, -46, -48, -50, -52, -54, -55, + -57, -59, -61, -62, -64, -66, -67, -69, + -71, -72, -74, -75, -77, -78, -80, -81, + -83, -84, -86, -87, -88, -90, -91, -92, + -93, -95, -96, -97, -98, -99, -100, -101, + -102, -103, -104, -105, -106, -106, -107, -108, + -109, -109, -110, -111, -111, -112, -112, -113, + -113, -114, -114, -114, -115, -115, -115, -115, + -116, -116, -116, -116, -116, -116, -116, -116, + -116, -115, -115, -115, -115, -114, -114, -114, + -113, -113, -112, -112, -111, -111, -110, -110, + -109, -108, -108, -107, -106, -105, -104, -103, + -102, -101, -100, -99, -98, -97, -96, -95, + -94, -93, -91, -90, -89, -88, -86, -85, + -84, -82, -81, -79, -78, -76, -75, -73, + -71, -70, -68, -67, -65, -63, -62, -60, + -58, -56, -55, -53, -51, -49, -47, -45, + -44, -42, -40, -38, -36, -34, -32, -30, + -28, -26, -24, -22, -20, -18, -16, -14, + -12, -10, -8, -6, -4, -2, 0, 1, + 3, 5, 7, 9, 11, 13, 15, 17, + 19, 21, 23, 25, 27, 29, 31, 33, + 35, 37, 39, 41, 43, 45, 46, 48, + 50, 52, 54, 55, 57, 59, 61, 62, + 64, 66, 67, 69, 71, 72, 74, 75, + 77, 78, 80, 81, 83, 84, 86, 87, + 88, 90, 91, 92, 93, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, + 106, 106, 107, 108, 109, 109, 110, 111, + 111, 112, 112, 113, 113, 114, 114, 114, + 115, 115, 115, 115, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 115, 115, 115, + 115, 114, 114, 114, 113, 113, 112, 112, + 111, 111, 110, 110, 109, 108, 108, 107, + 106, 105, 104, 103, 102, 101, 100, 99, + 98, 97, 96, 95, 94, 93, 91, 90, + 89, 88, 86, 85, 84, 82, 81, 79, + 78, 76, 75, 73, 71, 70, 68, 67, + 65, 63, 62, 60, 58, 56, 55, 53, + 51, 49, 47, 45, 44, 42, 40, 38, + 36, 34, 32, 30, 28, 26, 24, 22, + 20, 18, 16, 14, 12, 10, 8, 6, + 4, 2, 0, -1, -3, -5, -7, -9, -11 +}; + +static u16 i2c_ident[] = { + V4L2_IDENT_OV9650, + V4L2_IDENT_OV9655, + V4L2_IDENT_SOI968, + V4L2_IDENT_OV7660, + V4L2_IDENT_OV7670, + V4L2_IDENT_MT9V011, + V4L2_IDENT_MT9V111, + V4L2_IDENT_MT9V112, + V4L2_IDENT_MT9M001C12ST, + V4L2_IDENT_MT9M111, + V4L2_IDENT_HV7131R, +}; + +static u16 bridge_init[][2] = { + {0x1000, 0x78}, {0x1001, 0x40}, {0x1002, 0x1c}, + {0x1020, 0x80}, {0x1061, 0x01}, {0x1067, 0x40}, + {0x1068, 0x30}, {0x1069, 0x20}, {0x106a, 0x10}, + {0x106b, 0x08}, {0x1188, 0x87}, {0x11a1, 0x00}, + {0x11a2, 0x00}, {0x11a3, 0x6a}, {0x11a4, 0x50}, + {0x11ab, 0x00}, {0x11ac, 0x00}, {0x11ad, 0x50}, + {0x11ae, 0x3c}, {0x118a, 0x04}, {0x0395, 0x04}, + {0x11b8, 0x3a}, {0x118b, 0x0e}, {0x10f7, 0x05}, + {0x10f8, 0x14}, {0x10fa, 0xff}, {0x10f9, 0x00}, + {0x11ba, 0x0a}, {0x11a5, 0x2d}, {0x11a6, 0x2d}, + {0x11a7, 0x3a}, {0x11a8, 0x05}, {0x11a9, 0x04}, + {0x11aa, 0x3f}, {0x11af, 0x28}, {0x11b0, 0xd8}, + {0x11b1, 0x14}, {0x11b2, 0xec}, {0x11b3, 0x32}, + {0x11b4, 0xdd}, {0x11b5, 0x32}, {0x11b6, 0xdd}, + {0x10e0, 0x2c}, {0x11bc, 0x40}, {0x11bd, 0x01}, + {0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f}, + {0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f}, + {0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01}, + {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80} +}; + +/* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */ +static u8 ov_gain[] = { + 0x00 /* 1x */, 0x04 /* 1.25x */, 0x08 /* 1.5x */, 0x0c /* 1.75x */, + 0x10 /* 2x */, 0x12 /* 2.25x */, 0x14 /* 2.5x */, 0x16 /* 2.75x */, + 0x18 /* 3x */, 0x1a /* 3.25x */, 0x1c /* 3.5x */, 0x1e /* 3.75x */, + 0x30 /* 4x */, 0x31 /* 4.25x */, 0x32 /* 4.5x */, 0x33 /* 4.75x */, + 0x34 /* 5x */, 0x35 /* 5.25x */, 0x36 /* 5.5x */, 0x37 /* 5.75x */, + 0x38 /* 6x */, 0x39 /* 6.25x */, 0x3a /* 6.5x */, 0x3b /* 6.75x */, + 0x3c /* 7x */, 0x3d /* 7.25x */, 0x3e /* 7.5x */, 0x3f /* 7.75x */, + 0x70 /* 8x */ +}; + +/* Gain = (bit[8] + 1) * (bit[7] + 1) * (bit[6:0] * 0.03125) */ +static u16 micron1_gain[] = { + /* 1x 1.25x 1.5x 1.75x */ + 0x0020, 0x0028, 0x0030, 0x0038, + /* 2x 2.25x 2.5x 2.75x */ + 0x00a0, 0x00a4, 0x00a8, 0x00ac, + /* 3x 3.25x 3.5x 3.75x */ + 0x00b0, 0x00b4, 0x00b8, 0x00bc, + /* 4x 4.25x 4.5x 4.75x */ + 0x00c0, 0x00c4, 0x00c8, 0x00cc, + /* 5x 5.25x 5.5x 5.75x */ + 0x00d0, 0x00d4, 0x00d8, 0x00dc, + /* 6x 6.25x 6.5x 6.75x */ + 0x00e0, 0x00e4, 0x00e8, 0x00ec, + /* 7x 7.25x 7.5x 7.75x */ + 0x00f0, 0x00f4, 0x00f8, 0x00fc, + /* 8x */ + 0x01c0 +}; + +/* mt9m001 sensor uses a different gain formula then other micron sensors */ +/* Gain = (bit[6] + 1) * (bit[5-0] * 0.125) */ +static u16 micron2_gain[] = { + /* 1x 1.25x 1.5x 1.75x */ + 0x0008, 0x000a, 0x000c, 0x000e, + /* 2x 2.25x 2.5x 2.75x */ + 0x0010, 0x0012, 0x0014, 0x0016, + /* 3x 3.25x 3.5x 3.75x */ + 0x0018, 0x001a, 0x001c, 0x001e, + /* 4x 4.25x 4.5x 4.75x */ + 0x0020, 0x0051, 0x0052, 0x0053, + /* 5x 5.25x 5.5x 5.75x */ + 0x0054, 0x0055, 0x0056, 0x0057, + /* 6x 6.25x 6.5x 6.75x */ + 0x0058, 0x0059, 0x005a, 0x005b, + /* 7x 7.25x 7.5x 7.75x */ + 0x005c, 0x005d, 0x005e, 0x005f, + /* 8x */ + 0x0060 +}; + +/* Gain = .5 + bit[7:0] / 16 */ +static u8 hv7131r_gain[] = { + 0x08 /* 1x */, 0x0c /* 1.25x */, 0x10 /* 1.5x */, 0x14 /* 1.75x */, + 0x18 /* 2x */, 0x1c /* 2.25x */, 0x20 /* 2.5x */, 0x24 /* 2.75x */, + 0x28 /* 3x */, 0x2c /* 3.25x */, 0x30 /* 3.5x */, 0x34 /* 3.75x */, + 0x38 /* 4x */, 0x3c /* 4.25x */, 0x40 /* 4.5x */, 0x44 /* 4.75x */, + 0x48 /* 5x */, 0x4c /* 5.25x */, 0x50 /* 5.5x */, 0x54 /* 5.75x */, + 0x58 /* 6x */, 0x5c /* 6.25x */, 0x60 /* 6.5x */, 0x64 /* 6.75x */, + 0x68 /* 7x */, 0x6c /* 7.25x */, 0x70 /* 7.5x */, 0x74 /* 7.75x */, + 0x78 /* 8x */ +}; + +static u8 soi968_init[][2] = { + {0x12, 0x80}, {0x0c, 0x00}, {0x0f, 0x1f}, + {0x11, 0x80}, {0x38, 0x52}, {0x1e, 0x00}, + {0x33, 0x08}, {0x35, 0x8c}, {0x36, 0x0c}, + {0x37, 0x04}, {0x45, 0x04}, {0x47, 0xff}, + {0x3e, 0x00}, {0x3f, 0x00}, {0x3b, 0x20}, + {0x3a, 0x96}, {0x3d, 0x0a}, {0x14, 0x8e}, + {0x13, 0x8a}, {0x12, 0x40}, {0x17, 0x13}, + {0x18, 0x63}, {0x19, 0x01}, {0x1a, 0x79}, + {0x32, 0x24}, {0x03, 0x00}, {0x11, 0x40}, + {0x2a, 0x10}, {0x2b, 0xe0}, {0x10, 0x32}, + {0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80}, +}; + +static u8 ov7660_init[][2] = { + {0x0e, 0x80}, {0x0d, 0x08}, {0x0f, 0xc3}, + {0x04, 0xc3}, {0x10, 0x40}, {0x11, 0x40}, + {0x12, 0x05}, {0x13, 0xba}, {0x14, 0x2a}, + {0x37, 0x0f}, {0x38, 0x02}, {0x39, 0x43}, + {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0xf6}, + {0x2e, 0x0b}, {0x01, 0x78}, {0x02, 0x50}, +}; + +static u8 ov7670_init[][2] = { + {0x12, 0x80}, {0x11, 0x80}, {0x3a, 0x04}, {0x12, 0x01}, + {0x32, 0xb6}, {0x03, 0x0a}, {0x0c, 0x00}, {0x3e, 0x00}, + {0x70, 0x3a}, {0x71, 0x35}, {0x72, 0x11}, {0x73, 0xf0}, + {0xa2, 0x02}, {0x13, 0xe0}, {0x00, 0x00}, {0x10, 0x00}, + {0x0d, 0x40}, {0x14, 0x28}, {0xa5, 0x05}, {0xab, 0x07}, + {0x24, 0x95}, {0x25, 0x33}, {0x26, 0xe3}, {0x9f, 0x75}, + {0xa0, 0x65}, {0xa1, 0x0b}, {0xa6, 0xd8}, {0xa7, 0xd8}, + {0xa8, 0xf0}, {0xa9, 0x90}, {0xaa, 0x94}, {0x13, 0xe5}, + {0x0e, 0x61}, {0x0f, 0x4b}, {0x16, 0x02}, {0x1e, 0x27}, + {0x21, 0x02}, {0x22, 0x91}, {0x29, 0x07}, {0x33, 0x0b}, + {0x35, 0x0b}, {0x37, 0x1d}, {0x38, 0x71}, {0x39, 0x2a}, + {0x3c, 0x78}, {0x4d, 0x40}, {0x4e, 0x20}, {0x69, 0x00}, + {0x74, 0x19}, {0x8d, 0x4f}, {0x8e, 0x00}, {0x8f, 0x00}, + {0x90, 0x00}, {0x91, 0x00}, {0x96, 0x00}, {0x9a, 0x80}, + {0xb0, 0x84}, {0xb1, 0x0c}, {0xb2, 0x0e}, {0xb3, 0x82}, + {0xb8, 0x0a}, {0x43, 0x0a}, {0x44, 0xf0}, {0x45, 0x20}, + {0x46, 0x7d}, {0x47, 0x29}, {0x48, 0x4a}, {0x59, 0x8c}, + {0x5a, 0xa5}, {0x5b, 0xde}, {0x5c, 0x96}, {0x5d, 0x66}, + {0x5e, 0x10}, {0x6c, 0x0a}, {0x6d, 0x55}, {0x6e, 0x11}, + {0x6f, 0x9e}, {0x6a, 0x40}, {0x01, 0x40}, {0x02, 0x40}, + {0x13, 0xe7}, {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x02}, + {0x52, 0x1d}, {0x53, 0x56}, {0x54, 0x73}, {0x55, 0x0a}, + {0x56, 0x55}, {0x57, 0x80}, {0x58, 0x9e}, {0x41, 0x08}, + {0x3f, 0x02}, {0x75, 0x03}, {0x76, 0x63}, {0x4c, 0x04}, + {0x77, 0x06}, {0x3d, 0x02}, {0x4b, 0x09}, {0xc9, 0x30}, + {0x41, 0x08}, {0x56, 0x48}, {0x34, 0x11}, {0xa4, 0x88}, + {0x96, 0x00}, {0x97, 0x30}, {0x98, 0x20}, {0x99, 0x30}, + {0x9a, 0x84}, {0x9b, 0x29}, {0x9c, 0x03}, {0x9d, 0x99}, + {0x9e, 0x7f}, {0x78, 0x04}, {0x79, 0x01}, {0xc8, 0xf0}, + {0x79, 0x0f}, {0xc8, 0x00}, {0x79, 0x10}, {0xc8, 0x7e}, + {0x79, 0x0a}, {0xc8, 0x80}, {0x79, 0x0b}, {0xc8, 0x01}, + {0x79, 0x0c}, {0xc8, 0x0f}, {0x79, 0x0d}, {0xc8, 0x20}, + {0x79, 0x09}, {0xc8, 0x80}, {0x79, 0x02}, {0xc8, 0xc0}, + {0x79, 0x03}, {0xc8, 0x40}, {0x79, 0x05}, {0xc8, 0x30}, + {0x79, 0x26}, {0x62, 0x20}, {0x63, 0x00}, {0x64, 0x06}, + {0x65, 0x00}, {0x66, 0x05}, {0x94, 0x05}, {0x95, 0x0a}, + {0x17, 0x13}, {0x18, 0x01}, {0x19, 0x02}, {0x1a, 0x7a}, + {0x46, 0x59}, {0x47, 0x30}, {0x58, 0x9a}, {0x59, 0x84}, + {0x5a, 0x91}, {0x5b, 0x57}, {0x5c, 0x75}, {0x5d, 0x6d}, + {0x5e, 0x13}, {0x64, 0x07}, {0x94, 0x07}, {0x95, 0x0d}, + {0xa6, 0xdf}, {0xa7, 0xdf}, {0x48, 0x4d}, {0x51, 0x00}, + {0x6b, 0x0a}, {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, + {0x92, 0x00}, {0x93, 0x00}, {0x55, 0x0a}, {0x56, 0x60}, + {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, + {0x53, 0x56}, {0x54, 0x73}, {0x58, 0x9a}, {0x4f, 0x6e}, + {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, {0x53, 0x56}, + {0x54, 0x73}, {0x58, 0x9a}, {0x3f, 0x01}, {0x7b, 0x03}, + {0x7c, 0x09}, {0x7d, 0x16}, {0x7e, 0x38}, {0x7f, 0x47}, + {0x80, 0x53}, {0x81, 0x5e}, {0x82, 0x6a}, {0x83, 0x74}, + {0x84, 0x80}, {0x85, 0x8c}, {0x86, 0x9b}, {0x87, 0xb2}, + {0x88, 0xcc}, {0x89, 0xe5}, {0x7a, 0x24}, {0x3b, 0x00}, + {0x9f, 0x76}, {0xa0, 0x65}, {0x13, 0xe2}, {0x6b, 0x0a}, + {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, {0x92, 0x00}, + {0x93, 0x00}, +}; + +static u8 ov9650_init[][2] = { + {0x12, 0x80}, {0x00, 0x00}, {0x01, 0x78}, + {0x02, 0x78}, {0x03, 0x36}, {0x04, 0x03}, + {0x05, 0x00}, {0x06, 0x00}, {0x08, 0x00}, + {0x09, 0x01}, {0x0c, 0x00}, {0x0d, 0x00}, + {0x0e, 0xa0}, {0x0f, 0x52}, {0x10, 0x7c}, + {0x11, 0x80}, {0x12, 0x45}, {0x13, 0xc2}, + {0x14, 0x2e}, {0x15, 0x00}, {0x16, 0x07}, + {0x17, 0x24}, {0x18, 0xc5}, {0x19, 0x00}, + {0x1a, 0x3c}, {0x1b, 0x00}, {0x1e, 0x04}, + {0x1f, 0x00}, {0x24, 0x78}, {0x25, 0x68}, + {0x26, 0xd4}, {0x27, 0x80}, {0x28, 0x80}, + {0x29, 0x30}, {0x2a, 0x00}, {0x2b, 0x00}, + {0x2c, 0x80}, {0x2d, 0x00}, {0x2e, 0x00}, + {0x2f, 0x00}, {0x30, 0x08}, {0x31, 0x30}, + {0x32, 0x84}, {0x33, 0xe2}, {0x34, 0xbf}, + {0x35, 0x81}, {0x36, 0xf9}, {0x37, 0x00}, + {0x38, 0x93}, {0x39, 0x50}, {0x3a, 0x01}, + {0x3b, 0x01}, {0x3c, 0x73}, {0x3d, 0x19}, + {0x3e, 0x0b}, {0x3f, 0x80}, {0x40, 0xc1}, + {0x41, 0x00}, {0x42, 0x08}, {0x67, 0x80}, + {0x68, 0x80}, {0x69, 0x40}, {0x6a, 0x00}, + {0x6b, 0x0a}, {0x8b, 0x06}, {0x8c, 0x20}, + {0x8d, 0x00}, {0x8e, 0x00}, {0x8f, 0xdf}, + {0x92, 0x00}, {0x93, 0x00}, {0x94, 0x88}, + {0x95, 0x88}, {0x96, 0x04}, {0xa1, 0x00}, + {0xa5, 0x80}, {0xa8, 0x80}, {0xa9, 0xb8}, + {0xaa, 0x92}, {0xab, 0x0a}, +}; + +static u8 ov9655_init[][2] = { + {0x12, 0x80}, {0x12, 0x01}, {0x0d, 0x00}, {0x0e, 0x61}, + {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24}, + {0x1e, 0x04}, {0x1e, 0x04}, {0x1e, 0x04}, {0x27, 0x08}, + {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x32, 0xbf}, + {0x34, 0x3d}, {0x35, 0x00}, {0x36, 0xf8}, {0x38, 0x12}, + {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, + {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, + {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, + {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, + {0x4d, 0xdc}, {0x4e, 0xdc}, {0x69, 0x02}, {0x6c, 0x04}, + {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02}, + {0x8a, 0x23}, {0x8c, 0x0d}, {0x90, 0x7e}, {0x91, 0x7c}, + {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60}, + {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04}, + {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80}, + {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf}, + {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b}, + {0xbf, 0x3a}, {0xc0, 0xe2}, {0xc1, 0xc8}, {0xc2, 0x01}, + {0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0}, + {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x12, 0x61}, + {0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a}, + {0x03, 0x12}, {0x17, 0x14}, {0x18, 0x00}, {0x19, 0x01}, + {0x1a, 0x3d}, {0x32, 0xbf}, {0x11, 0x80}, {0x2a, 0x10}, + {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0x1e, 0x04}, + {0x1e, 0x04}, {0x10, 0x7c}, {0x04, 0x03}, {0xa1, 0x00}, + {0x2d, 0x00}, {0x2e, 0x00}, {0x00, 0x00}, {0x01, 0x80}, + {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d}, + {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x12}, {0x17, 0x14}, + {0x18, 0x00}, {0x19, 0x01}, {0x1a, 0x3d}, {0x32, 0xbf}, + {0x11, 0x80}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00}, + {0x93, 0x00}, {0x04, 0x01}, {0x10, 0x1f}, {0xa1, 0x00}, + {0x00, 0x0a}, {0xa1, 0x00}, {0x10, 0x5d}, {0x04, 0x03}, + {0x00, 0x01}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03}, + {0x00, 0x03}, {0x00, 0x0a}, {0x00, 0x10}, {0x00, 0x13}, +}; + +static u16 mt9v112_init[][2] = { + {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0020}, + {0x34, 0xc019}, {0x0a, 0x0011}, {0x0b, 0x000b}, + {0x20, 0x0703}, {0x35, 0x2022}, {0xf0, 0x0001}, + {0x05, 0x0000}, {0x06, 0x340c}, {0x3b, 0x042a}, + {0x3c, 0x0400}, {0xf0, 0x0002}, {0x2e, 0x0c58}, + {0x5b, 0x0001}, {0xc8, 0x9f0b}, {0xf0, 0x0001}, + {0x9b, 0x5300}, {0xf0, 0x0000}, {0x2b, 0x0020}, + {0x2c, 0x002a}, {0x2d, 0x0032}, {0x2e, 0x0020}, + {0x09, 0x01dc}, {0x01, 0x000c}, {0x02, 0x0020}, + {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c}, + {0x05, 0x0098}, {0x20, 0x0703}, {0x09, 0x01f2}, + {0x2b, 0x00a0}, {0x2c, 0x00a0}, {0x2d, 0x00a0}, + {0x2e, 0x00a0}, {0x01, 0x000c}, {0x02, 0x0020}, + {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c}, + {0x05, 0x0098}, {0x09, 0x01c1}, {0x2b, 0x00ae}, + {0x2c, 0x00ae}, {0x2d, 0x00ae}, {0x2e, 0x00ae}, +}; + +static u16 mt9v111_init[][2] = { + {0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000}, + {0x01, 0x0001}, {0x02, 0x0016}, {0x03, 0x01e1}, + {0x04, 0x0281}, {0x05, 0x0004}, {0x07, 0x3002}, + {0x21, 0x0000}, {0x25, 0x4024}, {0x26, 0xff03}, + {0x27, 0xff10}, {0x2b, 0x7828}, {0x2c, 0xb43c}, + {0x2d, 0xf0a0}, {0x2e, 0x0c64}, {0x2f, 0x0064}, + {0x67, 0x4010}, {0x06, 0x301e}, {0x08, 0x0480}, + {0x01, 0x0004}, {0x02, 0x0016}, {0x03, 0x01e6}, + {0x04, 0x0286}, {0x05, 0x0004}, {0x06, 0x0000}, + {0x07, 0x3002}, {0x08, 0x0008}, {0x0c, 0x0000}, + {0x0d, 0x0000}, {0x0e, 0x0000}, {0x0f, 0x0000}, + {0x10, 0x0000}, {0x11, 0x0000}, {0x12, 0x00b0}, + {0x13, 0x007c}, {0x14, 0x0000}, {0x15, 0x0000}, + {0x16, 0x0000}, {0x17, 0x0000}, {0x18, 0x0000}, + {0x19, 0x0000}, {0x1a, 0x0000}, {0x1b, 0x0000}, + {0x1c, 0x0000}, {0x1d, 0x0000}, {0x30, 0x0000}, + {0x30, 0x0005}, {0x31, 0x0000}, {0x02, 0x0016}, + {0x03, 0x01e1}, {0x04, 0x0281}, {0x05, 0x0004}, + {0x06, 0x0000}, {0x07, 0x3002}, {0x06, 0x002d}, + {0x05, 0x0004}, {0x09, 0x0064}, {0x2b, 0x00a0}, + {0x2c, 0x00a0}, {0x2d, 0x00a0}, {0x2e, 0x00a0}, + {0x02, 0x0016}, {0x03, 0x01e1}, {0x04, 0x0281}, + {0x05, 0x0004}, {0x06, 0x002d}, {0x07, 0x3002}, + {0x0e, 0x0008}, {0x06, 0x002d}, {0x05, 0x0004}, +}; + +static u16 mt9v011_init[][2] = { + {0x07, 0x0002}, {0x0d, 0x0001}, {0x0d, 0x0000}, + {0x01, 0x0008}, {0x02, 0x0016}, {0x03, 0x01e1}, + {0x04, 0x0281}, {0x05, 0x0083}, {0x06, 0x0006}, + {0x0d, 0x0002}, {0x0a, 0x0000}, {0x0b, 0x0000}, + {0x0c, 0x0000}, {0x0d, 0x0000}, {0x0e, 0x0000}, + {0x0f, 0x0000}, {0x10, 0x0000}, {0x11, 0x0000}, + {0x12, 0x0000}, {0x13, 0x0000}, {0x14, 0x0000}, + {0x15, 0x0000}, {0x16, 0x0000}, {0x17, 0x0000}, + {0x18, 0x0000}, {0x19, 0x0000}, {0x1a, 0x0000}, + {0x1b, 0x0000}, {0x1c, 0x0000}, {0x1d, 0x0000}, + {0x32, 0x0000}, {0x20, 0x1101}, {0x21, 0x0000}, + {0x22, 0x0000}, {0x23, 0x0000}, {0x24, 0x0000}, + {0x25, 0x0000}, {0x26, 0x0000}, {0x27, 0x0024}, + {0x2f, 0xf7b0}, {0x30, 0x0005}, {0x31, 0x0000}, + {0x32, 0x0000}, {0x33, 0x0000}, {0x34, 0x0100}, + {0x3d, 0x068f}, {0x40, 0x01e0}, {0x41, 0x00d1}, + {0x44, 0x0082}, {0x5a, 0x0000}, {0x5b, 0x0000}, + {0x5c, 0x0000}, {0x5d, 0x0000}, {0x5e, 0x0000}, + {0x5f, 0xa31d}, {0x62, 0x0611}, {0x0a, 0x0000}, + {0x06, 0x0029}, {0x05, 0x0009}, {0x20, 0x1101}, + {0x20, 0x1101}, {0x09, 0x0064}, {0x07, 0x0003}, + {0x2b, 0x0033}, {0x2c, 0x00a0}, {0x2d, 0x00a0}, + {0x2e, 0x0033}, {0x07, 0x0002}, {0x06, 0x0000}, + {0x06, 0x0029}, {0x05, 0x0009}, +}; + +static u16 mt9m001_init[][2] = { + {0x0d, 0x0001}, {0x0d, 0x0000}, {0x01, 0x000e}, + {0x02, 0x0014}, {0x03, 0x03c1}, {0x04, 0x0501}, + {0x05, 0x0083}, {0x06, 0x0006}, {0x0d, 0x0002}, + {0x0a, 0x0000}, {0x0c, 0x0000}, {0x11, 0x0000}, + {0x1e, 0x8000}, {0x5f, 0x8904}, {0x60, 0x0000}, + {0x61, 0x0000}, {0x62, 0x0498}, {0x63, 0x0000}, + {0x64, 0x0000}, {0x20, 0x111d}, {0x06, 0x00f2}, + {0x05, 0x0013}, {0x09, 0x10f2}, {0x07, 0x0003}, + {0x2b, 0x002a}, {0x2d, 0x002a}, {0x2c, 0x002a}, + {0x2e, 0x0029}, {0x07, 0x0002}, +}; + +static u16 mt9m111_init[][2] = { + {0xf0, 0x0000}, {0x0d, 0x0008}, {0x0d, 0x0009}, + {0x0d, 0x0008}, {0xf0, 0x0001}, {0x3a, 0x4300}, + {0x9b, 0x4300}, {0xa1, 0x0280}, {0xa4, 0x0200}, + {0x06, 0x308e}, {0xf0, 0x0000}, +}; + +static u8 hv7131r_init[][2] = { + {0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08}, + {0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0}, + {0x22, 0x00}, {0x23, 0x09}, {0x01, 0x08}, + {0x01, 0x08}, {0x01, 0x08}, {0x25, 0x07}, + {0x26, 0xc3}, {0x27, 0x50}, {0x30, 0x62}, + {0x31, 0x10}, {0x32, 0x06}, {0x33, 0x10}, + {0x20, 0x00}, {0x21, 0xd0}, {0x22, 0x00}, + {0x23, 0x09}, {0x01, 0x08}, +}; + +int reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length) +{ + struct usb_device *dev = gspca_dev->dev; + int result; + result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + 0x00, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + reg, + 0x00, + gspca_dev->usb_buf, + length, + 500); + if (unlikely(result < 0 || result != length)) { + err("Read register failed 0x%02X", reg); + return -EIO; + } + return 0; +} + +int reg_w(struct gspca_dev *gspca_dev, u16 reg, const u8 *buffer, int length) +{ + struct usb_device *dev = gspca_dev->dev; + int result; + memcpy(gspca_dev->usb_buf, buffer, length); + result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + 0x08, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, + reg, + 0x00, + gspca_dev->usb_buf, + length, + 500); + if (unlikely(result < 0 || result != length)) { + err("Write register failed index 0x%02X", reg); + return -EIO; + } + return 0; +} + +int reg_w1(struct gspca_dev *gspca_dev, u16 reg, const u8 value) +{ + u8 data[1] = {value}; + return reg_w(gspca_dev, reg, data, 1); +} + +int i2c_w(struct gspca_dev *gspca_dev, const u8 *buffer) +{ + int i; + reg_w(gspca_dev, 0x10c0, buffer, 8); + for (i = 0; i < 5; i++) { + reg_r(gspca_dev, 0x10c0, 1); + if (gspca_dev->usb_buf[0] & 0x04) { + if (gspca_dev->usb_buf[0] & 0x08) + return -1; + return 0; + } + msleep(1); + } + return -1; +} + +int i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + u8 row[8]; + + /* + * from the point of view of the bridge, the length + * includes the address + */ + row[0] = 0x81 | (2 << 4); + row[1] = sd->i2c_addr; + row[2] = reg; + row[3] = val; + row[4] = 0x00; + row[5] = 0x00; + row[6] = 0x00; + row[7] = 0x10; + + return i2c_w(gspca_dev, row); +} + +int i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 row[8]; + + /* + * from the point of view of the bridge, the length + * includes the address + */ + row[0] = 0x81 | (3 << 4); + row[1] = sd->i2c_addr; + row[2] = reg; + row[3] = (val >> 8) & 0xff; + row[4] = val & 0xff; + row[5] = 0x00; + row[6] = 0x00; + row[7] = 0x10; + + return i2c_w(gspca_dev, row); +} + +int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 row[8]; + + row[0] = 0x81 | 0x10; + row[1] = sd->i2c_addr; + row[2] = reg; + row[3] = 0; + row[4] = 0; + row[5] = 0; + row[6] = 0; + row[7] = 0x10; + reg_w(gspca_dev, 0x10c0, row, 8); + msleep(1); + row[0] = 0x81 | (2 << 4) | 0x02; + row[2] = 0; + reg_w(gspca_dev, 0x10c0, row, 8); + msleep(1); + reg_r(gspca_dev, 0x10c2, 5); + *val = gspca_dev->usb_buf[3]; + return 0; +} + +int i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 row[8]; + + row[0] = 0x81 | 0x10; + row[1] = sd->i2c_addr; + row[2] = reg; + row[3] = 0; + row[4] = 0; + row[5] = 0; + row[6] = 0; + row[7] = 0x10; + reg_w(gspca_dev, 0x10c0, row, 8); + msleep(1); + row[0] = 0x81 | (3 << 4) | 0x02; + row[2] = 0; + reg_w(gspca_dev, 0x10c0, row, 8); + msleep(1); + reg_r(gspca_dev, 0x10c2, 5); + *val = (gspca_dev->usb_buf[2] << 8) | gspca_dev->usb_buf[3]; + return 0; +} + +static int ov9650_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(ov9650_init); i++) { + if (i2c_w1(gspca_dev, ov9650_init[i][0], + ov9650_init[i][1]) < 0) { + err("OV9650 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 1; + sd->vstart = 7; + return 0; +} + +static int ov9655_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(ov9655_init); i++) { + if (i2c_w1(gspca_dev, ov9655_init[i][0], + ov9655_init[i][1]) < 0) { + err("OV9655 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 0; + sd->vstart = 7; + return 0; +} + +static int soi968_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(soi968_init); i++) { + if (i2c_w1(gspca_dev, soi968_init[i][0], + soi968_init[i][1]) < 0) { + err("SOI968 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 60; + sd->vstart = 11; + return 0; +} + +static int ov7660_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(ov7660_init); i++) { + if (i2c_w1(gspca_dev, ov7660_init[i][0], + ov7660_init[i][1]) < 0) { + err("OV7660 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 1; + sd->vstart = 1; + return 0; +} + +static int ov7670_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(ov7670_init); i++) { + if (i2c_w1(gspca_dev, ov7670_init[i][0], + ov7670_init[i][1]) < 0) { + err("OV7670 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 0; + sd->vstart = 1; + return 0; +} + +static int mt9v_init_sensor(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int i; + u16 value; + int ret; + + sd->i2c_addr = 0x5d; + ret = i2c_r2(gspca_dev, 0xff, &value); + if ((ret == 0) && (value == 0x8243)) { + for (i = 0; i < ARRAY_SIZE(mt9v011_init); i++) { + if (i2c_w2(gspca_dev, mt9v011_init[i][0], + mt9v011_init[i][1]) < 0) { + err("MT9V011 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 2; + sd->vstart = 2; + sd->sensor = SENSOR_MT9V011; + info("MT9V011 sensor detected"); + return 0; + } + + sd->i2c_addr = 0x5c; + i2c_w2(gspca_dev, 0x01, 0x0004); + ret = i2c_r2(gspca_dev, 0xff, &value); + if ((ret == 0) && (value == 0x823a)) { + for (i = 0; i < ARRAY_SIZE(mt9v111_init); i++) { + if (i2c_w2(gspca_dev, mt9v111_init[i][0], + mt9v111_init[i][1]) < 0) { + err("MT9V111 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 2; + sd->vstart = 2; + sd->sensor = SENSOR_MT9V111; + info("MT9V111 sensor detected"); + return 0; + } + + sd->i2c_addr = 0x5d; + ret = i2c_w2(gspca_dev, 0xf0, 0x0000); + if (ret < 0) { + sd->i2c_addr = 0x48; + i2c_w2(gspca_dev, 0xf0, 0x0000); + } + ret = i2c_r2(gspca_dev, 0x00, &value); + if ((ret == 0) && (value == 0x1229)) { + for (i = 0; i < ARRAY_SIZE(mt9v112_init); i++) { + if (i2c_w2(gspca_dev, mt9v112_init[i][0], + mt9v112_init[i][1]) < 0) { + err("MT9V112 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 6; + sd->vstart = 2; + sd->sensor = SENSOR_MT9V112; + info("MT9V112 sensor detected"); + return 0; + } + + return -ENODEV; +} + +static int mt9m111_init_sensor(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int i; + for (i = 0; i < ARRAY_SIZE(mt9m111_init); i++) { + if (i2c_w2(gspca_dev, mt9m111_init[i][0], + mt9m111_init[i][1]) < 0) { + err("MT9M111 sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 0; + sd->vstart = 2; + return 0; +} + +static int mt9m001_init_sensor(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int i; + for (i = 0; i < ARRAY_SIZE(mt9m001_init); i++) { + if (i2c_w2(gspca_dev, mt9m001_init[i][0], + mt9m001_init[i][1]) < 0) { + err("MT9M001 sensor initialization failed"); + return -ENODEV; + } + } + /* disable hflip and vflip */ + gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX); + sd->hstart = 2; + sd->vstart = 2; + return 0; +} + +static int hv7131r_init_sensor(struct gspca_dev *gspca_dev) +{ + int i; + struct sd *sd = (struct sd *) gspca_dev; + + for (i = 0; i < ARRAY_SIZE(hv7131r_init); i++) { + if (i2c_w1(gspca_dev, hv7131r_init[i][0], + hv7131r_init[i][1]) < 0) { + err("HV7131R Sensor initialization failed"); + return -ENODEV; + } + } + sd->hstart = 0; + sd->vstart = 1; + return 0; +} + +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV +static int input_kthread(void *data) +{ + struct gspca_dev *gspca_dev = (struct gspca_dev *)data; + struct sd *sd = (struct sd *) gspca_dev; + + DECLARE_WAIT_QUEUE_HEAD(wait); + set_freezable(); + for (;;) { + if (kthread_should_stop()) + break; + + if (reg_r(gspca_dev, 0x1005, 1) < 0) + continue; + + input_report_key(sd->input_dev, + KEY_CAMERA, + gspca_dev->usb_buf[0] & sd->input_gpio); + input_sync(sd->input_dev); + + wait_event_freezable_timeout(wait, + kthread_should_stop(), + msecs_to_jiffies(100)); + } + return 0; +} + + +static int sn9c20x_input_init(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + if (sd->input_gpio == 0) + return 0; + + sd->input_dev = input_allocate_device(); + if (!sd->input_dev) + return -ENOMEM; + + sd->input_dev->name = "SN9C20X Webcam"; + + sd->input_dev->phys = kasprintf(GFP_KERNEL, "usb-%s-%s", + gspca_dev->dev->bus->bus_name, + gspca_dev->dev->devpath); + + if (!sd->input_dev->phys) + return -ENOMEM; + + usb_to_input_id(gspca_dev->dev, &sd->input_dev->id); + sd->input_dev->dev.parent = &gspca_dev->dev->dev; + + set_bit(EV_KEY, sd->input_dev->evbit); + set_bit(KEY_CAMERA, sd->input_dev->keybit); + + if (input_register_device(sd->input_dev)) + return -EINVAL; + + sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%d", + gspca_dev->vdev.minor); + + if (IS_ERR(sd->input_task)) + return -EINVAL; + + return 0; +} + +static void sn9c20x_input_cleanup(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + if (sd->input_task != NULL && !IS_ERR(sd->input_task)) + kthread_stop(sd->input_task); + + if (sd->input_dev != NULL) { + input_unregister_device(sd->input_dev); + kfree(sd->input_dev->phys); + input_free_device(sd->input_dev); + sd->input_dev = NULL; + } +} +#endif + +static int set_cmatrix(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + s32 hue_coord, hue_index = 180 + sd->hue; + u8 cmatrix[21]; + memset(cmatrix, 0, 21); + + cmatrix[2] = (sd->contrast * 0x25 / 0x100) + 0x26; + cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25; + cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25; + cmatrix[18] = sd->brightness - 0x80; + + hue_coord = (hsv_red_x[hue_index] * sd->saturation) >> 8; + cmatrix[6] = (unsigned char)(hue_coord & 0xff); + cmatrix[7] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_red_y[hue_index] * sd->saturation) >> 8; + cmatrix[8] = (unsigned char)(hue_coord & 0xff); + cmatrix[9] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_green_x[hue_index] * sd->saturation) >> 8; + cmatrix[10] = (unsigned char)(hue_coord & 0xff); + cmatrix[11] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_green_y[hue_index] * sd->saturation) >> 8; + cmatrix[12] = (unsigned char)(hue_coord & 0xff); + cmatrix[13] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_blue_x[hue_index] * sd->saturation) >> 8; + cmatrix[14] = (unsigned char)(hue_coord & 0xff); + cmatrix[15] = (unsigned char)((hue_coord >> 8) & 0x0f); + + hue_coord = (hsv_blue_y[hue_index] * sd->saturation) >> 8; + cmatrix[16] = (unsigned char)(hue_coord & 0xff); + cmatrix[17] = (unsigned char)((hue_coord >> 8) & 0x0f); + + return reg_w(gspca_dev, 0x10e1, cmatrix, 21); +} + +static int set_gamma(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 gamma[17]; + u8 gval = sd->gamma * 0xb8 / 0x100; + + + gamma[0] = 0x0a; + gamma[1] = 0x13 + (gval * (0xcb - 0x13) / 0xb8); + gamma[2] = 0x25 + (gval * (0xee - 0x25) / 0xb8); + gamma[3] = 0x37 + (gval * (0xfa - 0x37) / 0xb8); + gamma[4] = 0x45 + (gval * (0xfc - 0x45) / 0xb8); + gamma[5] = 0x55 + (gval * (0xfb - 0x55) / 0xb8); + gamma[6] = 0x65 + (gval * (0xfc - 0x65) / 0xb8); + gamma[7] = 0x74 + (gval * (0xfd - 0x74) / 0xb8); + gamma[8] = 0x83 + (gval * (0xfe - 0x83) / 0xb8); + gamma[9] = 0x92 + (gval * (0xfc - 0x92) / 0xb8); + gamma[10] = 0xa1 + (gval * (0xfc - 0xa1) / 0xb8); + gamma[11] = 0xb0 + (gval * (0xfc - 0xb0) / 0xb8); + gamma[12] = 0xbf + (gval * (0xfb - 0xbf) / 0xb8); + gamma[13] = 0xce + (gval * (0xfb - 0xce) / 0xb8); + gamma[14] = 0xdf + (gval * (0xfd - 0xdf) / 0xb8); + gamma[15] = 0xea + (gval * (0xf9 - 0xea) / 0xb8); + gamma[16] = 0xf5; + + return reg_w(gspca_dev, 0x1190, gamma, 17); +} + +static int set_redblue(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + reg_w1(gspca_dev, 0x118c, sd->red); + reg_w1(gspca_dev, 0x118f, sd->blue); + return 0; +} + +static int set_hvflip(struct gspca_dev *gspca_dev) +{ + u8 value, tslb; + u16 value2; + struct sd *sd = (struct sd *) gspca_dev; + switch (sd->sensor) { + case SENSOR_OV9650: + i2c_r1(gspca_dev, 0x1e, &value); + value &= ~0x30; + tslb = 0x01; + if (sd->hflip) + value |= 0x20; + if (sd->vflip) { + value |= 0x10; + tslb = 0x49; + } + i2c_w1(gspca_dev, 0x1e, value); + i2c_w1(gspca_dev, 0x3a, tslb); + break; + case SENSOR_MT9V111: + case SENSOR_MT9V011: + i2c_r2(gspca_dev, 0x20, &value2); + value2 &= ~0xc0a0; + if (sd->hflip) + value2 |= 0x8080; + if (sd->vflip) + value2 |= 0x4020; + i2c_w2(gspca_dev, 0x20, value2); + break; + case SENSOR_MT9M111: + case SENSOR_MT9V112: + i2c_r2(gspca_dev, 0x20, &value2); + value2 &= ~0x0003; + if (sd->hflip) + value2 |= 0x0002; + if (sd->vflip) + value2 |= 0x0001; + i2c_w2(gspca_dev, 0x20, value2); + break; + case SENSOR_HV7131R: + i2c_r1(gspca_dev, 0x01, &value); + value &= ~0x03; + if (sd->vflip) + value |= 0x01; + if (sd->hflip) + value |= 0x02; + i2c_w1(gspca_dev, 0x01, value); + break; + } + return 0; +} + +static int set_exposure(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 exp[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e}; + switch (sd->sensor) { + case SENSOR_OV7660: + case SENSOR_OV7670: + case SENSOR_SOI968: + case SENSOR_OV9655: + case SENSOR_OV9650: + exp[0] |= (3 << 4); + exp[2] = 0x2d; + exp[3] = sd->exposure & 0xff; + exp[4] = sd->exposure >> 8; + break; + case SENSOR_MT9M001: + case SENSOR_MT9M111: + case SENSOR_MT9V112: + case SENSOR_MT9V111: + case SENSOR_MT9V011: + exp[0] |= (3 << 4); + exp[2] = 0x09; + exp[3] = sd->exposure >> 8; + exp[4] = sd->exposure & 0xff; + break; + case SENSOR_HV7131R: + exp[0] |= (4 << 4); + exp[2] = 0x25; + exp[3] = ((sd->exposure * 0xffffff) / 0xffff) >> 16; + exp[4] = ((sd->exposure * 0xffffff) / 0xffff) >> 8; + exp[5] = ((sd->exposure * 0xffffff) / 0xffff) & 0xff; + break; + } + i2c_w(gspca_dev, exp); + return 0; +} + +static int set_gain(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 gain[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d}; + switch (sd->sensor) { + case SENSOR_OV7660: + case SENSOR_OV7670: + case SENSOR_SOI968: + case SENSOR_OV9655: + case SENSOR_OV9650: + gain[0] |= (2 << 4); + gain[3] = ov_gain[sd->gain]; + break; + case SENSOR_MT9V011: + case SENSOR_MT9V111: + gain[0] |= (3 << 4); + gain[2] = 0x35; + gain[3] = micron1_gain[sd->gain] >> 8; + gain[4] = micron1_gain[sd->gain] & 0xff; + break; + case SENSOR_MT9V112: + case SENSOR_MT9M111: + gain[0] |= (3 << 4); + gain[2] = 0x2f; + gain[3] = micron1_gain[sd->gain] >> 8; + gain[4] = micron1_gain[sd->gain] & 0xff; + break; + case SENSOR_MT9M001: + gain[0] |= (3 << 4); + gain[2] = 0x2f; + gain[3] = micron2_gain[sd->gain] >> 8; + gain[4] = micron2_gain[sd->gain] & 0xff; + break; + case SENSOR_HV7131R: + gain[0] |= (2 << 4); + gain[2] = 0x30; + gain[3] = hv7131r_gain[sd->gain]; + break; + } + i2c_w(gspca_dev, gain); + return 0; +} + +static int sd_setbrightness(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->brightness = val; + if (gspca_dev->streaming) + return set_cmatrix(gspca_dev); + return 0; +} + +static int sd_getbrightness(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->brightness; + return 0; +} + + +static int sd_setcontrast(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->contrast = val; + if (gspca_dev->streaming) + return set_cmatrix(gspca_dev); + return 0; +} + +static int sd_getcontrast(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->contrast; + return 0; +} + +static int sd_setsaturation(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->saturation = val; + if (gspca_dev->streaming) + return set_cmatrix(gspca_dev); + return 0; +} + +static int sd_getsaturation(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->saturation; + return 0; +} + +static int sd_sethue(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->hue = val; + if (gspca_dev->streaming) + return set_cmatrix(gspca_dev); + return 0; +} + +static int sd_gethue(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->hue; + return 0; +} + +static int sd_setgamma(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->gamma = val; + if (gspca_dev->streaming) + return set_gamma(gspca_dev); + return 0; +} + +static int sd_getgamma(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->gamma; + return 0; +} + +static int sd_setredbalance(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->red = val; + if (gspca_dev->streaming) + return set_redblue(gspca_dev); + return 0; +} + +static int sd_getredbalance(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->red; + return 0; +} + +static int sd_setbluebalance(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->blue = val; + if (gspca_dev->streaming) + return set_redblue(gspca_dev); + return 0; +} + +static int sd_getbluebalance(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->blue; + return 0; +} + +static int sd_sethflip(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->hflip = val; + if (gspca_dev->streaming) + return set_hvflip(gspca_dev); + return 0; +} + +static int sd_gethflip(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->hflip; + return 0; +} + +static int sd_setvflip(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->vflip = val; + if (gspca_dev->streaming) + return set_hvflip(gspca_dev); + return 0; +} + +static int sd_getvflip(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->vflip; + return 0; +} + +static int sd_setexposure(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->exposure = val; + if (gspca_dev->streaming) + return set_exposure(gspca_dev); + return 0; +} + +static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->exposure; + return 0; +} + +static int sd_setgain(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + + sd->gain = val; + if (gspca_dev->streaming) + return set_gain(gspca_dev); + return 0; +} + +static int sd_getgain(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->gain; + return 0; +} + +static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val) +{ + struct sd *sd = (struct sd *) gspca_dev; + sd->auto_exposure = val; + return 0; +} + +static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val) +{ + struct sd *sd = (struct sd *) gspca_dev; + *val = sd->auto_exposure; + return 0; +} + +#ifdef CONFIG_VIDEO_ADV_DEBUG +static int sd_dbg_g_register(struct gspca_dev *gspca_dev, + struct v4l2_dbg_register *reg) +{ + struct sd *sd = (struct sd *) gspca_dev; + switch (reg->match.type) { + case V4L2_CHIP_MATCH_HOST: + if (reg->match.addr != 0) + return -EINVAL; + if (reg->reg < 0x1000 || reg->reg > 0x11ff) + return -EINVAL; + if (reg_r(gspca_dev, reg->reg, 1) < 0) + return -EINVAL; + reg->val = gspca_dev->usb_buf[0]; + return 0; + case V4L2_CHIP_MATCH_I2C_ADDR: + if (reg->match.addr != sd->i2c_addr) + return -EINVAL; + if (sd->sensor >= SENSOR_MT9V011 && + sd->sensor <= SENSOR_MT9M111) { + if (i2c_r2(gspca_dev, reg->reg, (u16 *)®->val) < 0) + return -EINVAL; + } else { + if (i2c_r1(gspca_dev, reg->reg, (u8 *)®->val) < 0) + return -EINVAL; + } + return 0; + } + return -EINVAL; +} + +static int sd_dbg_s_register(struct gspca_dev *gspca_dev, + struct v4l2_dbg_register *reg) +{ + struct sd *sd = (struct sd *) gspca_dev; + switch (reg->match.type) { + case V4L2_CHIP_MATCH_HOST: + if (reg->match.addr != 0) + return -EINVAL; + if (reg->reg < 0x1000 || reg->reg > 0x11ff) + return -EINVAL; + if (reg_w1(gspca_dev, reg->reg, reg->val) < 0) + return -EINVAL; + return 0; + case V4L2_CHIP_MATCH_I2C_ADDR: + if (reg->match.addr != sd->i2c_addr) + return -EINVAL; + if (sd->sensor >= SENSOR_MT9V011 && + sd->sensor <= SENSOR_MT9M111) { + if (i2c_w2(gspca_dev, reg->reg, reg->val) < 0) + return -EINVAL; + } else { + if (i2c_w1(gspca_dev, reg->reg, reg->val) < 0) + return -EINVAL; + } + return 0; + } + return -EINVAL; +} +#endif + +static int sd_chip_ident(struct gspca_dev *gspca_dev, + struct v4l2_dbg_chip_ident *chip) +{ + struct sd *sd = (struct sd *) gspca_dev; + + switch (chip->match.type) { + case V4L2_CHIP_MATCH_HOST: + if (chip->match.addr != 0) + return -EINVAL; + chip->revision = 0; + chip->ident = V4L2_IDENT_SN9C20X; + return 0; + case V4L2_CHIP_MATCH_I2C_ADDR: + if (chip->match.addr != sd->i2c_addr) + return -EINVAL; + chip->revision = 0; + chip->ident = i2c_ident[sd->sensor]; + return 0; + } + return -EINVAL; +} + +static int sd_config(struct gspca_dev *gspca_dev, + const struct usb_device_id *id) +{ + struct sd *sd = (struct sd *) gspca_dev; + struct cam *cam; + + cam = &gspca_dev->cam; + + sd->sensor = (id->driver_info >> 8) & 0xff; + sd->i2c_addr = id->driver_info & 0xff; + + switch (sd->sensor) { + case SENSOR_OV9650: + cam->cam_mode = sxga_mode; + cam->nmodes = ARRAY_SIZE(sxga_mode); + break; + default: + cam->cam_mode = vga_mode; + cam->nmodes = ARRAY_SIZE(vga_mode); + } + + sd->old_step = 0; + sd->older_step = 0; + sd->exposure_step = 16; + + sd->brightness = BRIGHTNESS_DEFAULT; + sd->contrast = CONTRAST_DEFAULT; + sd->saturation = SATURATION_DEFAULT; + sd->hue = HUE_DEFAULT; + sd->gamma = GAMMA_DEFAULT; + sd->red = RED_DEFAULT; + sd->blue = BLUE_DEFAULT; + + sd->hflip = HFLIP_DEFAULT; + sd->vflip = VFLIP_DEFAULT; + sd->exposure = EXPOSURE_DEFAULT; + sd->gain = GAIN_DEFAULT; + sd->auto_exposure = AUTO_EXPOSURE_DEFAULT; + + sd->quality = 95; + +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV + sd->input_gpio = (id->driver_info >> 16) & 0xff; + if (sn9c20x_input_init(gspca_dev) < 0) + return -ENODEV; +#endif + return 0; +} + +static int sd_init(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int i; + u8 value; + u8 i2c_init[9] = + {0x80, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}; + + for (i = 0; i < ARRAY_SIZE(bridge_init); i++) { + value = bridge_init[i][1]; + if (reg_w(gspca_dev, bridge_init[i][0], &value, 1) < 0) { + err("Device initialization failed"); + return -ENODEV; + } + } + + if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) { + err("Device initialization failed"); + return -ENODEV; + } + + switch (sd->sensor) { + case SENSOR_OV9650: + if (ov9650_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("OV9650 sensor detected"); + break; + case SENSOR_OV9655: + if (ov9655_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("OV9655 sensor detected"); + break; + case SENSOR_SOI968: + if (soi968_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("SOI968 sensor detected"); + break; + case SENSOR_OV7660: + if (ov7660_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("OV7660 sensor detected"); + break; + case SENSOR_OV7670: + if (ov7670_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("OV7670 sensor detected"); + break; + case SENSOR_MT9VPRB: + if (mt9v_init_sensor(gspca_dev) < 0) + return -ENODEV; + break; + case SENSOR_MT9M111: + if (mt9m111_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("MT9M111 sensor detected"); + break; + case SENSOR_MT9M001: + if (mt9m001_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("MT9M001 sensor detected"); + break; + case SENSOR_HV7131R: + if (hv7131r_init_sensor(gspca_dev) < 0) + return -ENODEV; + info("HV7131R sensor detected"); + break; + default: + info("Unsupported Sensor"); + return -ENODEV; + } + + return 0; +} + +static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode) +{ + struct sd *sd = (struct sd *) gspca_dev; + u8 value; + switch (sd->sensor) { + case SENSOR_OV9650: + if (mode & MODE_SXGA) { + i2c_w1(gspca_dev, 0x17, 0x1b); + i2c_w1(gspca_dev, 0x18, 0xbc); + i2c_w1(gspca_dev, 0x19, 0x01); + i2c_w1(gspca_dev, 0x1a, 0x82); + i2c_r1(gspca_dev, 0x12, &value); + i2c_w1(gspca_dev, 0x12, value & 0x07); + } else { + i2c_w1(gspca_dev, 0x17, 0x24); + i2c_w1(gspca_dev, 0x18, 0xc5); + i2c_w1(gspca_dev, 0x19, 0x00); + i2c_w1(gspca_dev, 0x1a, 0x3c); + i2c_r1(gspca_dev, 0x12, &value); + i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40); + } + break; + } +} + +#define HW_WIN(mode, hstart, vstart) \ +((const u8 []){hstart & 0xff, hstart >> 8, \ +vstart & 0xff, vstart >> 8, \ +(mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \ +(mode & MODE_SXGA ? 1024 >> 3 : 480 >> 3)}) + +#define CLR_WIN(width, height) \ +((const u8 [])\ +{0, width >> 2, 0, height >> 1,\ +((width >> 10) & 0x01) | ((height >> 8) & 0x6)}) + +static int sd_start(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; + int width = gspca_dev->width; + int height = gspca_dev->height; + u8 fmt, scale = 0; + + sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (sd->jpeg_hdr == NULL) + return -ENOMEM; + + jpeg_define(sd->jpeg_hdr, height, width, + 0x21); + jpeg_set_qual(sd->jpeg_hdr, sd->quality); + + if (mode & MODE_RAW) + fmt = 0x2d; + else if (mode & MODE_JPEG) + fmt = 0x2c; + else + fmt = 0x2f; + + switch (mode & 0x0f) { + case 3: + scale = 0xc0; + info("Set 1280x1024"); + break; + case 2: + scale = 0x80; + info("Set 640x480"); + break; + case 1: + scale = 0x90; + info("Set 320x240"); + break; + case 0: + scale = 0xa0; + info("Set 160x120"); + break; + } + + configure_sensor_output(gspca_dev, mode); + reg_w(gspca_dev, 0x1100, sd->jpeg_hdr + JPEG_QT0_OFFSET, 64); + reg_w(gspca_dev, 0x1140, sd->jpeg_hdr + JPEG_QT1_OFFSET, 64); + reg_w(gspca_dev, 0x10fb, CLR_WIN(width, height), 5); + reg_w(gspca_dev, 0x1180, HW_WIN(mode, sd->hstart, sd->vstart), 6); + reg_w1(gspca_dev, 0x1189, scale); + reg_w1(gspca_dev, 0x10e0, fmt); + + set_cmatrix(gspca_dev); + set_gamma(gspca_dev); + set_redblue(gspca_dev); + set_gain(gspca_dev); + set_exposure(gspca_dev); + set_hvflip(gspca_dev); + + reg_r(gspca_dev, 0x1061, 1); + reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] | 0x02); + return 0; +} + +static void sd_stopN(struct gspca_dev *gspca_dev) +{ + reg_r(gspca_dev, 0x1061, 1); + reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] & ~0x02); +} + +static void sd_stop0(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + kfree(sd->jpeg_hdr); +} + +static void do_autoexposure(struct gspca_dev *gspca_dev) +{ + struct sd *sd = (struct sd *) gspca_dev; + int avg_lum, new_exp; + + if (!sd->auto_exposure) + return; + + avg_lum = atomic_read(&sd->avg_lum); + + /* + * some hardcoded values are present + * like those for maximal/minimal exposure + * and exposure steps + */ + if (avg_lum < MIN_AVG_LUM) { + if (sd->exposure > 0x1770) + return; + + new_exp = sd->exposure + sd->exposure_step; + if (new_exp > 0x1770) + new_exp = 0x1770; + if (new_exp < 0x10) + new_exp = 0x10; + sd->exposure = new_exp; + set_exposure(gspca_dev); + + sd->older_step = sd->old_step; + sd->old_step = 1; + + if (sd->old_step ^ sd->older_step) + sd->exposure_step /= 2; + else + sd->exposure_step += 2; + } + if (avg_lum > MAX_AVG_LUM) { + if (sd->exposure < 0x10) + return; + new_exp = sd->exposure - sd->exposure_step; + if (new_exp > 0x1700) + new_exp = 0x1770; + if (new_exp < 0x10) + new_exp = 0x10; + sd->exposure = new_exp; + set_exposure(gspca_dev); + sd->older_step = sd->old_step; + sd->old_step = 0; + + if (sd->old_step ^ sd->older_step) + sd->exposure_step /= 2; + else + sd->exposure_step += 2; + } +} + +static void sd_pkt_scan(struct gspca_dev *gspca_dev, + struct gspca_frame *frame, /* target */ + u8 *data, /* isoc packet */ + int len) /* iso packet length */ +{ + struct sd *sd = (struct sd *) gspca_dev; + int avg_lum; + static unsigned char frame_header[] = + {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96}; + if (len == 64 && memcmp(data, frame_header, 6) == 0) { + avg_lum = ((data[35] >> 2) & 3) | + (data[20] << 2) | + (data[19] << 10); + avg_lum += ((data[35] >> 4) & 3) | + (data[22] << 2) | + (data[21] << 10); + avg_lum += ((data[35] >> 6) & 3) | + (data[24] << 2) | + (data[23] << 10); + avg_lum += (data[36] & 3) | + (data[26] << 2) | + (data[25] << 10); + avg_lum += ((data[36] >> 2) & 3) | + (data[28] << 2) | + (data[27] << 10); + avg_lum += ((data[36] >> 4) & 3) | + (data[30] << 2) | + (data[29] << 10); + avg_lum += ((data[36] >> 6) & 3) | + (data[32] << 2) | + (data[31] << 10); + avg_lum += ((data[44] >> 4) & 3) | + (data[34] << 2) | + (data[33] << 10); + avg_lum >>= 9; + atomic_set(&sd->avg_lum, avg_lum); + gspca_frame_add(gspca_dev, LAST_PACKET, + frame, data, len); + return; + } + if (gspca_dev->last_packet_type == LAST_PACKET) { + if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv + & MODE_JPEG) { + gspca_frame_add(gspca_dev, FIRST_PACKET, frame, + sd->jpeg_hdr, JPEG_HDR_SZ); + gspca_frame_add(gspca_dev, INTER_PACKET, frame, + data, len); + } else { + gspca_frame_add(gspca_dev, FIRST_PACKET, frame, + data, len); + } + } else { + gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); + } +} + +/* sub-driver description */ +static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, + .nctrls = ARRAY_SIZE(sd_ctrls), + .config = sd_config, + .init = sd_init, + .start = sd_start, + .stopN = sd_stopN, + .stop0 = sd_stop0, + .pkt_scan = sd_pkt_scan, + .dq_callback = do_autoexposure, +#ifdef CONFIG_VIDEO_ADV_DEBUG + .set_register = sd_dbg_s_register, + .get_register = sd_dbg_g_register, +#endif + .get_chip_ident = sd_chip_ident, +}; + +#define SN9C20X(sensor, i2c_addr, button_mask) \ + .driver_info = (button_mask << 16) \ + | (SENSOR_ ## sensor << 8) \ + | (i2c_addr) + +static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, 0x10)}, + {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x6270), SN9C20X(MT9VPRB, 0x00, 0)}, + {USB_DEVICE(0x0c45, 0x627b), SN9C20X(OV7660, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x627c), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0x0c45, 0x627f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x62b0), SN9C20X(MT9VPRB, 0x00, 0)}, + {USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, 0)}, + {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, 0)}, + {USB_DEVICE(0x0c45, 0x62bc), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)}, + {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)}, + {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0618), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0xa168, 0x0615), SN9C20X(MT9M111, 0x5d, 0)}, + {USB_DEVICE(0xa168, 0x0617), SN9C20X(MT9M111, 0x5d, 0)}, + {} +}; +MODULE_DEVICE_TABLE(usb, device_table); + +/* -- device connect -- */ +static int sd_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), + THIS_MODULE); +} + +static void sd_disconnect(struct usb_interface *intf) +{ +#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV + struct gspca_dev *gspca_dev = usb_get_intfdata(intf); + + sn9c20x_input_cleanup(gspca_dev); +#endif + + gspca_disconnect(intf); +} + +static struct usb_driver sd_driver = { + .name = MODULE_NAME, + .id_table = device_table, + .probe = sd_probe, + .disconnect = sd_disconnect, +#ifdef CONFIG_PM + .suspend = gspca_suspend, + .resume = gspca_resume, + .reset_resume = gspca_resume, +#endif +}; + +/* -- module insert / remove -- */ +static int __init sd_mod_init(void) +{ + int ret; + ret = usb_register(&sd_driver); + if (ret < 0) + return ret; + info("registered"); + return 0; +} +static void __exit sd_mod_exit(void) +{ + usb_deregister(&sd_driver); + info("deregistered"); +} + +module_init(sd_mod_init); +module_exit(sd_mod_exit); diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index 0d02f41..d6332ab 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c @@ -1634,6 +1634,8 @@ static void setfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; + if (gspca_dev->ctrl_dis & (1 << FREQ_IDX)) + return; if (sd->sensor == SENSOR_OV7660) { switch (sd->freq) { case 0: /* Banding filter disabled */ @@ -1735,6 +1737,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c index 8806b2f..fab7ef8 100644 --- a/drivers/media/video/gspca/spca500.c +++ b/drivers/media/video/gspca/spca500.c @@ -670,6 +670,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c index f25be20..4762896 100644 --- a/drivers/media/video/gspca/stk014.c +++ b/drivers/media/video/gspca/stk014.c @@ -333,6 +333,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c index 3039ec2..e5024c8 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c +++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c @@ -64,7 +64,7 @@ static struct v4l2_pix_format hdcs1x00_mode[] = { { HDCS_1X00_DEF_WIDTH, HDCS_1X00_DEF_HEIGHT, - V4L2_PIX_FMT_SBGGR8, + V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .sizeimage = HDCS_1X00_DEF_WIDTH * HDCS_1X00_DEF_HEIGHT, @@ -80,7 +80,7 @@ static struct v4l2_pix_format hdcs1020_mode[] = { { HDCS_1020_DEF_WIDTH, HDCS_1020_DEF_HEIGHT, - V4L2_PIX_FMT_SBGGR8, + V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .sizeimage = HDCS_1020_DEF_WIDTH * HDCS_1020_DEF_HEIGHT, @@ -131,9 +131,11 @@ static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len) (reg + len > 0xff))) return -EINVAL; - for (i = 0; i < len; i++, reg++) { - regs[2*i] = reg; - regs[2*i+1] = vals[i]; + for (i = 0; i < len; i++) { + regs[2 * i] = reg; + regs[2 * i + 1] = vals[i]; + /* All addresses are shifted left one bit as bit 0 toggles r/w */ + reg += 2; } return stv06xx_write_sensor_bytes(sd, regs, len); @@ -174,7 +176,9 @@ static int hdcs_set_state(struct sd *sd, enum hdcs_power_state state) } ret = stv06xx_write_sensor(sd, HDCS_REG_CONTROL(sd), val); - if (ret < 0) + + /* Update the state if the write succeeded */ + if (!ret) hdcs->state = state; return ret; diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c index 9623f29..5127bbf 100644 --- a/drivers/media/video/gspca/sunplus.c +++ b/drivers/media/video/gspca/sunplus.c @@ -973,6 +973,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c index 08422d3..3d2756f 100644 --- a/drivers/media/video/gspca/zc3xx.c +++ b/drivers/media/video/gspca/zc3xx.c @@ -7243,6 +7243,8 @@ static int sd_start(struct gspca_dev *gspca_dev) /* create the JPEG header */ sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); + if (!sd->jpeg_hdr) + return -ENOMEM; jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c index 1fe8fc9..b2260de 100644 --- a/drivers/media/video/mt9v011.c +++ b/drivers/media/video/mt9v011.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "mt9v011.h" #include @@ -57,6 +58,7 @@ static struct v4l2_queryctrl mt9v011_qctrl[] = { struct mt9v011 { struct v4l2_subdev sd; unsigned width, height; + unsigned xtal; u16 global_gain, red_bal, blue_bal; }; @@ -131,7 +133,7 @@ static const struct i2c_reg_value mt9v011_init_default[] = { { R1E_MT9V011_DIGITAL_ZOOM, 0x0000 }, { R20_MT9V011_READ_MODE, 0x1000 }, - { R07_MT9V011_OUT_CTRL, 0x000a }, /* chip enable */ + { R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */ }; static void set_balance(struct v4l2_subdev *sd) @@ -154,6 +156,31 @@ static void set_balance(struct v4l2_subdev *sd) mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain); } +static void calc_fps(struct v4l2_subdev *sd) +{ + struct mt9v011 *core = to_mt9v011(sd); + unsigned height, width, hblank, vblank, speed; + unsigned row_time, t_time; + u64 frames_per_ms; + unsigned tmp; + + height = mt9v011_read(sd, R03_MT9V011_HEIGHT); + width = mt9v011_read(sd, R04_MT9V011_WIDTH); + hblank = mt9v011_read(sd, R05_MT9V011_HBLANK); + vblank = mt9v011_read(sd, R06_MT9V011_VBLANK); + speed = mt9v011_read(sd, R0A_MT9V011_CLK_SPEED); + + row_time = (width + 113 + hblank) * (speed + 2); + t_time = row_time * (height + vblank + 1); + + frames_per_ms = core->xtal * 1000l; + do_div(frames_per_ms, t_time); + tmp = frames_per_ms; + + v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n", + tmp / 1000, tmp % 1000, t_time); +} + static void set_res(struct v4l2_subdev *sd) { struct mt9v011 *core = to_mt9v011(sd); @@ -175,10 +202,12 @@ static void set_res(struct v4l2_subdev *sd) mt9v011_write(sd, R04_MT9V011_WIDTH, core->width); mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width); - vstart = 8 + (640 - core->height) / 2; + vstart = 8 + (480 - core->height) / 2; mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart); mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height); mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height); + + calc_fps(sd); }; static int mt9v011_reset(struct v4l2_subdev *sd, u32 val) @@ -215,6 +244,23 @@ static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) return -EINVAL; } +static int mt9v011_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) +{ + int i; + + v4l2_dbg(1, debug, sd, "queryctrl called\n"); + + for (i = 0; i < ARRAY_SIZE(mt9v011_qctrl); i++) + if (qc->id && qc->id == mt9v011_qctrl[i].id) { + memcpy(qc, &(mt9v011_qctrl[i]), + sizeof(*qc)); + return 0; + } + + return -EINVAL; +} + + static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct mt9v011 *core = to_mt9v011(sd); @@ -294,6 +340,22 @@ static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt) return 0; } +static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data) +{ + struct mt9v011 *core = to_mt9v011(sd); + unsigned *xtal = data; + + v4l2_dbg(1, debug, sd, "s_config called\n"); + + if (xtal) { + core->xtal = *xtal; + v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n", + *xtal / 1000000, (*xtal / 1000) % 1000); + } + + return 0; +} + #ifdef CONFIG_VIDEO_ADV_DEBUG static int mt9v011_g_register(struct v4l2_subdev *sd, @@ -338,9 +400,11 @@ static int mt9v011_g_chip_ident(struct v4l2_subdev *sd, } static const struct v4l2_subdev_core_ops mt9v011_core_ops = { + .queryctrl = mt9v011_queryctrl, .g_ctrl = mt9v011_g_ctrl, .s_ctrl = mt9v011_s_ctrl, .reset = mt9v011_reset, + .s_config = mt9v011_s_config, .g_chip_ident = mt9v011_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = mt9v011_g_register, @@ -395,6 +459,7 @@ static int mt9v011_probe(struct i2c_client *c, core->global_gain = 0x0024; core->width = 640; core->height = 480; + core->xtal = 27000000; /* Hz */ v4l_info(c, "chip found @ 0x%02x (%s)\n", c->addr << 1, c->adapter->name); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 68ab39d..4448a83 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -76,6 +76,34 @@ config IBM_ASM information on the specific driver level and support statement for your IBM server. +config HWLAT_DETECTOR + tristate "Testing module to detect hardware-induced latencies" + depends on DEBUG_FS + default m + ---help--- + A simple hardware latency detector. Use this module to detect + large latencies introduced by the behavior of the underlying + system firmware external to Linux. We do this using periodic + use of stop_machine to grab all available CPUs and measure + for unexplainable gaps in the CPU timestamp counter(s). By + default, the module is not enabled until the "enable" file + within the "hwlat_detector" debugfs directory is toggled. + + This module is often used to detect SMI (System Management + Interrupts) on x86 systems, though is not x86 specific. To + this end, we default to using a sample window of 1 second, + during which we will sample for 0.5 seconds. If an SMI or + similar event occurs during that time, it is recorded + into an 8K samples global ring buffer until retreived. + + WARNING: This software should never be enabled (it can be built + but should not be turned on after it is loaded) in a production + environment where high latencies are a concern since the + sampling mechanism actually introduces latencies for + regular tasks while the CPU(s) are being held. + + If unsure, say N + config PHANTOM tristate "Sensable PHANToM (PCI)" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 36f733c..854d9ff 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -22,3 +22,4 @@ obj-$(CONFIG_ISL29003) += isl29003.o obj-$(CONFIG_C2PORT) += c2port/ obj-y += eeprom/ obj-y += cb710/ +obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c new file mode 100644 index 0000000..e02d8e1 --- /dev/null +++ b/drivers/misc/hwlat_detector.c @@ -0,0 +1,1208 @@ +/* + * hwlat_detector.c - A simple Hardware Latency detector. + * + * Use this module to detect large system latencies induced by the behavior of + * certain underlying system hardware or firmware, independent of Linux itself. + * The code was developed originally to detect the presence of SMIs on Intel + * and AMD systems, although there is no dependency upon x86 herein. + * + * The classical example usage of this module is in detecting the presence of + * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a + * somewhat special form of hardware interrupt spawned from earlier CPU debug + * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge + * LPC (or other device) to generate a special interrupt under certain + * circumstances, for example, upon expiration of a special SMI timer device, + * due to certain external thermal readings, on certain I/O address accesses, + * and other situations. An SMI hits a special CPU pin, triggers a special + * SMI mode (complete with special memory map), and the OS is unaware. + * + * Although certain hardware-inducing latencies are necessary (for example, + * a modern system often requires an SMI handler for correct thermal control + * and remote management) they can wreak havoc upon any OS-level performance + * guarantees toward low-latency, especially when the OS is not even made + * aware of the presence of these interrupts. For this reason, we need a + * somewhat brute force mechanism to detect these interrupts. In this case, + * we do it by hogging all of the CPU(s) for configurable timer intervals, + * sampling the built-in CPU timer, looking for discontiguous readings. + * + * WARNING: This implementation necessarily introduces latencies. Therefore, + * you should NEVER use this module in a production environment + * requiring any kind of low-latency performance guarantee(s). + * + * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. + * + * Includes useful feedback from Clark Williams + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ +#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ +#define U64STR_SIZE 22 /* 20 digits max */ + +#define VERSION "1.0.0" +#define BANNER "hwlat_detector: " +#define DRVNAME "hwlat_detector" +#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ +#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ +#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ + +/* Module metadata */ + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jon Masters "); +MODULE_DESCRIPTION("A simple hardware latency detector"); +MODULE_VERSION(VERSION); + +/* Module parameters */ + +static int debug; +static int enabled; +static int threshold; + +module_param(debug, int, 0); /* enable debug */ +module_param(enabled, int, 0); /* enable detector */ +module_param(threshold, int, 0); /* latency threshold */ + +/* Buffering and sampling */ + +static struct ring_buffer *ring_buffer; /* sample buffer */ +static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ +static unsigned long buf_size = BUF_SIZE_DEFAULT; +static struct task_struct *kthread; /* sampling thread */ + +/* DebugFS filesystem entries */ + +static struct dentry *debug_dir; /* debugfs directory */ +static struct dentry *debug_max; /* maximum TSC delta */ +static struct dentry *debug_count; /* total detect count */ +static struct dentry *debug_sample_width; /* sample width us */ +static struct dentry *debug_sample_window; /* sample window us */ +static struct dentry *debug_sample; /* raw samples us */ +static struct dentry *debug_threshold; /* threshold us */ +static struct dentry *debug_enable; /* enable/disable */ + +/* Individual samples and global state */ + +struct sample; /* latency sample */ +struct data; /* Global state */ + +/* Sampling functions */ +static int __buffer_add_sample(struct sample *sample); +static struct sample *buffer_get_sample(struct sample *sample); +static int get_sample(void *unused); + +/* Threading and state */ +static int kthread_fn(void *unused); +static int start_kthread(void); +static int stop_kthread(void); +static void __reset_stats(void); +static int init_stats(void); + +/* Debugfs interface */ +static ssize_t simple_data_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos, const u64 *entry); +static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos, u64 *entry); +static int debug_sample_fopen(struct inode *inode, struct file *filp); +static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos); +static int debug_sample_release(struct inode *inode, struct file *filp); +static int debug_enable_fopen(struct inode *inode, struct file *filp); +static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos); +static ssize_t debug_enable_fwrite(struct file *file, + const char __user *user_buffer, + size_t user_size, loff_t *offset); + +/* Initialization functions */ +static int init_debugfs(void); +static void free_debugfs(void); +static int detector_init(void); +static void detector_exit(void); + +/* Individual latency samples are stored here when detected and packed into + * the ring_buffer circular buffer, where they are overwritten when + * more than buf_size/sizeof(sample) samples are received. */ +struct sample { + u64 seqnum; /* unique sequence */ + u64 duration; /* ktime delta */ + struct timespec timestamp; /* wall time */ +}; + +/* keep the global state somewhere. Mostly used under stop_machine. */ +static struct data { + + struct mutex lock; /* protect changes */ + + u64 count; /* total since reset */ + u64 max_sample; /* max hardware latency */ + u64 threshold; /* sample threshold level */ + + u64 sample_window; /* total sampling window (on+off) */ + u64 sample_width; /* active sampling portion of window */ + + atomic_t sample_open; /* whether the sample file is open */ + + wait_queue_head_t wq; /* waitqeue for new sample values */ + +} data; + +/** + * __buffer_add_sample - add a new latency sample recording to the ring buffer + * @sample: The new latency sample value + * + * This receives a new latency sample and records it in a global ring buffer. + * No additional locking is used in this case - suited for stop_machine use. + */ +static int __buffer_add_sample(struct sample *sample) +{ + return ring_buffer_write(ring_buffer, + sizeof(struct sample), sample); +} + +/** + * buffer_get_sample - remove a hardware latency sample from the ring buffer + * @sample: Pre-allocated storage for the sample + * + * This retrieves a hardware latency sample from the global circular buffer + */ +static struct sample *buffer_get_sample(struct sample *sample) +{ + struct ring_buffer_event *e = NULL; + struct sample *s = NULL; + unsigned int cpu = 0; + + if (!sample) + return NULL; + + /* ring_buffers are per-cpu but we just want any value */ + /* so we'll start with this cpu and try others if not */ + /* Steven is planning to add a generic mechanism */ + mutex_lock(&ring_buffer_mutex); + e = ring_buffer_consume(ring_buffer, smp_processor_id(), NULL); + if (!e) { + for_each_online_cpu(cpu) { + e = ring_buffer_consume(ring_buffer, cpu, NULL); + if (e) + break; + } + } + + if (e) { + s = ring_buffer_event_data(e); + memcpy(sample, s, sizeof(struct sample)); + } else + sample = NULL; + mutex_unlock(&ring_buffer_mutex); + + return sample; +} + +/** + * get_sample - sample the CPU TSC and look for likely hardware latencies + * @unused: This is not used but is a part of the stop_machine API + * + * Used to repeatedly capture the CPU TSC (or similar), looking for potential + * hardware-induced latency. Called under stop_machine, with data.lock held. + */ +static int get_sample(void *unused) +{ + ktime_t start, t1, t2; + s64 diff, total = 0; + u64 sample = 0; + int ret = 1; + + start = ktime_get(); /* start timestamp */ + + do { + + t1 = ktime_get(); /* we'll look for a discontinuity */ + t2 = ktime_get(); + + total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ + diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ + + /* This shouldn't happen */ + if (diff < 0) { + printk(KERN_ERR BANNER "time running backwards\n"); + goto out; + } + + if (diff > sample) + sample = diff; /* only want highest value */ + + } while (total <= data.sample_width); + + /* If we exceed the threshold value, we have found a hardware latency */ + if (sample > data.threshold) { + struct sample s; + + data.count++; + s.seqnum = data.count; + s.duration = sample; + s.timestamp = CURRENT_TIME; + __buffer_add_sample(&s); + + /* Keep a running maximum ever recorded hardware latency */ + if (sample > data.max_sample) + data.max_sample = sample; + + wake_up(&data.wq); /* wake up reader(s) */ + } + + ret = 0; +out: + return ret; +} + +/* + * kthread_fn - The CPU time sampling/hardware latency detection kernel thread + * @unused: A required part of the kthread API. + * + * Used to periodically sample the CPU TSC via a call to get_sample. We + * use stop_machine, whith does (intentionally) introduce latency since we + * need to ensure nothing else might be running (and thus pre-empting). + * Obviously this should never be used in production environments. + * + * stop_machine will schedule us typically only on CPU0 which is fine for + * almost every real-world hardware latency situation - but we might later + * generalize this if we find there are any actualy systems with alternate + * SMI delivery or other non CPU0 hardware latencies. + */ +static int kthread_fn(void *unused) +{ + int err = 0; + u64 interval = 0; + + while (!kthread_should_stop()) { + + mutex_lock(&data.lock); + + err = stop_machine(get_sample, unused, 0); + if (err) { + /* Houston, we have a problem */ + mutex_unlock(&data.lock); + goto err_out; + } + + interval = data.sample_window - data.sample_width; + do_div(interval, USEC_PER_MSEC); /* modifies interval value */ + + mutex_unlock(&data.lock); + + if (msleep_interruptible(interval)) + goto out; + } + goto out; +err_out: + printk(KERN_ERR BANNER "could not call stop_machine, disabling\n"); + enabled = 0; +out: + return err; + +} + +/** + * start_kthread - Kick off the hardware latency sampling/detector kthread + * + * This starts a kernel thread that will sit and sample the CPU timestamp + * counter (TSC or similar) and look for potential hardware latencies. + */ +static int start_kthread(void) +{ + kthread = kthread_run(kthread_fn, NULL, + DRVNAME); + if (IS_ERR(kthread)) { + printk(KERN_ERR BANNER "could not start sampling thread\n"); + enabled = 0; + return -ENOMEM; + } + + return 0; +} + +/** + * stop_kthread - Inform the hardware latency samping/detector kthread to stop + * + * This kicks the running hardware latency sampling/detector kernel thread and + * tells it to stop sampling now. Use this on unload and at system shutdown. + */ +static int stop_kthread(void) +{ + int ret; + + ret = kthread_stop(kthread); + + return ret; +} + +/** + * __reset_stats - Reset statistics for the hardware latency detector + * + * We use data to store various statistics and global state. We call this + * function in order to reset those when "enable" is toggled on or off, and + * also at initialization. Should be called with data.lock held. + */ +static void __reset_stats(void) +{ + data.count = 0; + data.max_sample = 0; + ring_buffer_reset(ring_buffer); /* flush out old sample entries */ +} + +/** + * init_stats - Setup global state statistics for the hardware latency detector + * + * We use data to store various statistics and global state. We also use + * a global ring buffer (ring_buffer) to keep raw samples of detected hardware + * induced system latencies. This function initializes these structures and + * allocates the global ring buffer also. + */ +static int init_stats(void) +{ + int ret = -ENOMEM; + + mutex_init(&data.lock); + init_waitqueue_head(&data.wq); + atomic_set(&data.sample_open, 0); + + ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); + + if (WARN(!ring_buffer, KERN_ERR BANNER + "failed to allocate ring buffer!\n")) + goto out; + + __reset_stats(); + data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ + data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ + data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ + + ret = 0; + +out: + return ret; + +} + +/* + * simple_data_read - Wrapper read function for global state debugfs entries + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * @entry: The entry to read from + * + * This function provides a generic read implementation for the global state + * "data" structure debugfs filesystem entries. It would be nice to use + * simple_attr_read directly, but we need to make sure that the data.lock + * spinlock is held during the actual read (even though we likely won't ever + * actually race here as the updater runs under a stop_machine context). + */ +static ssize_t simple_data_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos, const u64 *entry) +{ + char buf[U64STR_SIZE]; + u64 val = 0; + int len = 0; + + memset(buf, 0, sizeof(buf)); + + if (!entry) + return -EFAULT; + + mutex_lock(&data.lock); + val = *entry; + mutex_unlock(&data.lock); + + len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); + +} + +/* + * simple_data_write - Wrapper write function for global state debugfs entries + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to write value from + * @cnt: The maximum number of bytes to write + * @ppos: The current "file" position + * @entry: The entry to write to + * + * This function provides a generic write implementation for the global state + * "data" structure debugfs filesystem entries. It would be nice to use + * simple_attr_write directly, but we need to make sure that the data.lock + * spinlock is held during the actual write (even though we likely won't ever + * actually race here as the updater runs under a stop_machine context). + */ +static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos, u64 *entry) +{ + char buf[U64STR_SIZE]; + int csize = min(cnt, sizeof(buf)); + u64 val = 0; + int err = 0; + + memset(buf, '\0', sizeof(buf)); + if (copy_from_user(buf, ubuf, csize)) + return -EFAULT; + + buf[U64STR_SIZE-1] = '\0'; /* just in case */ + err = strict_strtoull(buf, 10, &val); + if (err) + return -EINVAL; + + mutex_lock(&data.lock); + *entry = val; + mutex_unlock(&data.lock); + + return csize; +} + +/** + * debug_count_fopen - Open function for "count" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "count" debugfs + * interface to the hardware latency detector. + */ +static int debug_count_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_count_fread - Read function for "count" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "count" debugfs + * interface to the hardware latency detector. Can be used to read the + * number of latency readings exceeding the configured threshold since + * the detector was last reset (e.g. by writing a zero into "count"). + */ +static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.count); +} + +/** + * debug_count_fwrite - Write function for "count" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "count" debugfs + * interface to the hardware latency detector. Can be used to write a + * desired value, especially to zero the total count. + */ +static ssize_t debug_count_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + return simple_data_write(filp, ubuf, cnt, ppos, &data.count); +} + +/** + * debug_enable_fopen - Dummy open function for "enable" debugfs interface + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "enable" debugfs + * interface to the hardware latency detector. + */ +static int debug_enable_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_enable_fread - Read function for "enable" debugfs interface + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "enable" debugfs + * interface to the hardware latency detector. Can be used to determine + * whether the detector is currently enabled ("0\n" or "1\n" returned). + */ +static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[4]; + + if ((cnt < sizeof(buf)) || (*ppos)) + return 0; + + buf[0] = enabled ? '1' : '0'; + buf[1] = '\n'; + buf[2] = '\0'; + if (copy_to_user(ubuf, buf, strlen(buf))) + return -EFAULT; + return *ppos = strlen(buf); +} + +/** + * debug_enable_fwrite - Write function for "enable" debugfs interface + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "enable" debugfs + * interface to the hardware latency detector. Can be used to enable or + * disable the detector, which will have the side-effect of possibly + * also resetting the global stats and kicking off the measuring + * kthread (on an enable) or the converse (upon a disable). + */ +static ssize_t debug_enable_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + char buf[4]; + int csize = min(cnt, sizeof(buf)); + long val = 0; + int err = 0; + + memset(buf, '\0', sizeof(buf)); + if (copy_from_user(buf, ubuf, csize)) + return -EFAULT; + + buf[sizeof(buf)-1] = '\0'; /* just in case */ + err = strict_strtoul(buf, 10, &val); + if (0 != err) + return -EINVAL; + + if (val) { + if (enabled) + goto unlock; + enabled = 1; + __reset_stats(); + if (start_kthread()) + return -EFAULT; + } else { + if (!enabled) + goto unlock; + enabled = 0; + stop_kthread(); + wake_up(&data.wq); /* reader(s) should return */ + } +unlock: + return csize; +} + +/** + * debug_max_fopen - Open function for "max" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "max" debugfs + * interface to the hardware latency detector. + */ +static int debug_max_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_max_fread - Read function for "max" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "max" debugfs + * interface to the hardware latency detector. Can be used to determine + * the maximum latency value observed since it was last reset. + */ +static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); +} + +/** + * debug_max_fwrite - Write function for "max" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "max" debugfs + * interface to the hardware latency detector. Can be used to reset the + * maximum or set it to some other desired value - if, then, subsequent + * measurements exceed this value, the maximum will be updated. + */ +static ssize_t debug_max_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); +} + + +/** + * debug_sample_fopen - An open function for "sample" debugfs interface + * @inode: The in-kernel inode representation of this debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function handles opening the "sample" file within the hardware + * latency detector debugfs directory interface. This file is used to read + * raw samples from the global ring_buffer and allows the user to see a + * running latency history. Can be opened blocking or non-blocking, + * affecting whether it behaves as a buffer read pipe, or does not. + * Implements simple locking to prevent multiple simultaneous use. + */ +static int debug_sample_fopen(struct inode *inode, struct file *filp) +{ + if (!atomic_add_unless(&data.sample_open, 1, 1)) + return -EBUSY; + else + return 0; +} + +/** + * debug_sample_fread - A read function for "sample" debugfs interface + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that will contain the samples read + * @cnt: The maximum bytes to read from the debugfs "file" + * @ppos: The current position in the debugfs "file" + * + * This function handles reading from the "sample" file within the hardware + * latency detector debugfs directory interface. This file is used to read + * raw samples from the global ring_buffer and allows the user to see a + * running latency history. By default this will block pending a new + * value written into the sample buffer, unless there are already a + * number of value(s) waiting in the buffer, or the sample file was + * previously opened in a non-blocking mode of operation. + */ +static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + int len = 0; + char buf[64]; + struct sample *sample = NULL; + + if (!enabled) + return 0; + + sample = kzalloc(sizeof(struct sample), GFP_KERNEL); + if (!sample) + return -ENOMEM; + + while (!buffer_get_sample(sample)) { + + DEFINE_WAIT(wait); + + if (filp->f_flags & O_NONBLOCK) { + len = -EAGAIN; + goto out; + } + + prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&data.wq, &wait); + + if (signal_pending(current)) { + len = -EINTR; + goto out; + } + + if (!enabled) { /* enable was toggled */ + len = 0; + goto out; + } + } + + len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", + sample->timestamp.tv_sec, + sample->timestamp.tv_nsec, + sample->duration); + + + /* handling partial reads is more trouble than it's worth */ + if (len > cnt) + goto out; + + if (copy_to_user(ubuf, buf, len)) + len = -EFAULT; + +out: + kfree(sample); + return len; +} + +/** + * debug_sample_release - Release function for "sample" debugfs interface + * @inode: The in-kernel inode represenation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function completes the close of the debugfs interface "sample" file. + * Frees the sample_open "lock" so that other users may open the interface. + */ +static int debug_sample_release(struct inode *inode, struct file *filp) +{ + atomic_dec(&data.sample_open); + + return 0; +} + +/** + * debug_threshold_fopen - Open function for "threshold" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "threshold" debugfs + * interface to the hardware latency detector. + */ +static int debug_threshold_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_threshold_fread - Read function for "threshold" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "threshold" debugfs + * interface to the hardware latency detector. It can be used to determine + * the current threshold level at which a latency will be recorded in the + * global ring buffer, typically on the order of 10us. + */ +static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); +} + +/** + * debug_threshold_fwrite - Write function for "threshold" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "threshold" debugfs + * interface to the hardware latency detector. It can be used to configure + * the threshold level at which any subsequently detected latencies will + * be recorded into the global ring buffer. + */ +static ssize_t debug_threshold_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + int ret; + + ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); + + if (enabled) + wake_up_process(kthread); + + return ret; +} + +/** + * debug_width_fopen - Open function for "width" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "width" debugfs + * interface to the hardware latency detector. + */ +static int debug_width_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_width_fread - Read function for "width" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "width" debugfs + * interface to the hardware latency detector. It can be used to determine + * for how many us of the total window us we will actively sample for any + * hardware-induced latecy periods. Obviously, it is not possible to + * sample constantly and have the system respond to a sample reader, or, + * worse, without having the system appear to have gone out to lunch. + */ +static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); +} + +/** + * debug_width_fwrite - Write function for "width" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "width" debugfs + * interface to the hardware latency detector. It can be used to configure + * for how many us of the total window us we will actively sample for any + * hardware-induced latency periods. Obviously, it is not possible to + * sample constantly and have the system respond to a sample reader, or, + * worse, without having the system appear to have gone out to lunch. It + * is enforced that width is less that the total window size. + */ +static ssize_t debug_width_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + char buf[U64STR_SIZE]; + int csize = min(cnt, sizeof(buf)); + u64 val = 0; + int err = 0; + + memset(buf, '\0', sizeof(buf)); + if (copy_from_user(buf, ubuf, csize)) + return -EFAULT; + + buf[U64STR_SIZE-1] = '\0'; /* just in case */ + err = strict_strtoull(buf, 10, &val); + if (0 != err) + return -EINVAL; + + mutex_lock(&data.lock); + if (val < data.sample_window) + data.sample_width = val; + else { + mutex_unlock(&data.lock); + return -EINVAL; + } + mutex_unlock(&data.lock); + + if (enabled) + wake_up_process(kthread); + + return csize; +} + +/** + * debug_window_fopen - Open function for "window" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "window" debugfs + * interface to the hardware latency detector. The window is the total time + * in us that will be considered one sample period. Conceptually, windows + * occur back-to-back and contain a sample width period during which + * actual sampling occurs. + */ +static int debug_window_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_window_fread - Read function for "window" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "window" debugfs + * interface to the hardware latency detector. The window is the total time + * in us that will be considered one sample period. Conceptually, windows + * occur back-to-back and contain a sample width period during which + * actual sampling occurs. Can be used to read the total window size. + */ +static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); +} + +/** + * debug_window_fwrite - Write function for "window" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "window" debufds + * interface to the hardware latency detetector. The window is the total time + * in us that will be considered one sample period. Conceptually, windows + * occur back-to-back and contain a sample width period during which + * actual sampling occurs. Can be used to write a new total window size. It + * is enfoced that any value written must be greater than the sample width + * size, or an error results. + */ +static ssize_t debug_window_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + char buf[U64STR_SIZE]; + int csize = min(cnt, sizeof(buf)); + u64 val = 0; + int err = 0; + + memset(buf, '\0', sizeof(buf)); + if (copy_from_user(buf, ubuf, csize)) + return -EFAULT; + + buf[U64STR_SIZE-1] = '\0'; /* just in case */ + err = strict_strtoull(buf, 10, &val); + if (0 != err) + return -EINVAL; + + mutex_lock(&data.lock); + if (data.sample_width < val) + data.sample_window = val; + else { + mutex_unlock(&data.lock); + return -EINVAL; + } + mutex_unlock(&data.lock); + + return csize; +} + +/* + * Function pointers for the "count" debugfs file operations + */ +static const struct file_operations count_fops = { + .open = debug_count_fopen, + .read = debug_count_fread, + .write = debug_count_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "enable" debugfs file operations + */ +static const struct file_operations enable_fops = { + .open = debug_enable_fopen, + .read = debug_enable_fread, + .write = debug_enable_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "max" debugfs file operations + */ +static const struct file_operations max_fops = { + .open = debug_max_fopen, + .read = debug_max_fread, + .write = debug_max_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "sample" debugfs file operations + */ +static const struct file_operations sample_fops = { + .open = debug_sample_fopen, + .read = debug_sample_fread, + .release = debug_sample_release, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "threshold" debugfs file operations + */ +static const struct file_operations threshold_fops = { + .open = debug_threshold_fopen, + .read = debug_threshold_fread, + .write = debug_threshold_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "width" debugfs file operations + */ +static const struct file_operations width_fops = { + .open = debug_width_fopen, + .read = debug_width_fread, + .write = debug_width_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "window" debugfs file operations + */ +static const struct file_operations window_fops = { + .open = debug_window_fopen, + .read = debug_window_fread, + .write = debug_window_fwrite, + .owner = THIS_MODULE, +}; + +/** + * init_debugfs - A function to initialize the debugfs interface files + * + * This function creates entries in debugfs for "hwlat_detector", including + * files to read values from the detector, current samples, and the + * maximum sample that has been captured since the hardware latency + * dectector was started. + */ +static int init_debugfs(void) +{ + int ret = -ENOMEM; + + debug_dir = debugfs_create_dir(DRVNAME, NULL); + if (!debug_dir) + goto err_debug_dir; + + debug_sample = debugfs_create_file("sample", 0444, + debug_dir, NULL, + &sample_fops); + if (!debug_sample) + goto err_sample; + + debug_count = debugfs_create_file("count", 0444, + debug_dir, NULL, + &count_fops); + if (!debug_count) + goto err_count; + + debug_max = debugfs_create_file("max", 0444, + debug_dir, NULL, + &max_fops); + if (!debug_max) + goto err_max; + + debug_sample_window = debugfs_create_file("window", 0644, + debug_dir, NULL, + &window_fops); + if (!debug_sample_window) + goto err_window; + + debug_sample_width = debugfs_create_file("width", 0644, + debug_dir, NULL, + &width_fops); + if (!debug_sample_width) + goto err_width; + + debug_threshold = debugfs_create_file("threshold", 0644, + debug_dir, NULL, + &threshold_fops); + if (!debug_threshold) + goto err_threshold; + + debug_enable = debugfs_create_file("enable", 0644, + debug_dir, &enabled, + &enable_fops); + if (!debug_enable) + goto err_enable; + + else { + ret = 0; + goto out; + } + +err_enable: + debugfs_remove(debug_threshold); +err_threshold: + debugfs_remove(debug_sample_width); +err_width: + debugfs_remove(debug_sample_window); +err_window: + debugfs_remove(debug_max); +err_max: + debugfs_remove(debug_count); +err_count: + debugfs_remove(debug_sample); +err_sample: + debugfs_remove(debug_dir); +err_debug_dir: +out: + return ret; +} + +/** + * free_debugfs - A function to cleanup the debugfs file interface + */ +static void free_debugfs(void) +{ + /* could also use a debugfs_remove_recursive */ + debugfs_remove(debug_enable); + debugfs_remove(debug_threshold); + debugfs_remove(debug_sample_width); + debugfs_remove(debug_sample_window); + debugfs_remove(debug_max); + debugfs_remove(debug_count); + debugfs_remove(debug_sample); + debugfs_remove(debug_dir); +} + +/** + * detector_init - Standard module initialization code + */ +static int detector_init(void) +{ + int ret = -ENOMEM; + + printk(KERN_INFO BANNER "version %s\n", VERSION); + + ret = init_stats(); + if (0 != ret) + goto out; + + ret = init_debugfs(); + if (0 != ret) + goto err_stats; + + if (enabled) + ret = start_kthread(); + + goto out; + +err_stats: + ring_buffer_free(ring_buffer); +out: + return ret; + +} + +/** + * detector_exit - Standard module cleanup code + */ +static void detector_exit(void) +{ + if (enabled) { + enabled = 0; + stop_kthread(); + } + + free_debugfs(); + ring_buffer_free(ring_buffer); /* free up the ring buffer */ + +} + +module_init(detector_init); +module_exit(detector_exit); diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 49e5823..8df561b 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -194,7 +194,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock sg_init_table(mq->sg, host->max_phys_segs); } - init_MUTEX(&mq->thread_sem); + semaphore_init(&mq->thread_sem); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); if (IS_ERR(mq->thread)) { diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index aaa8a9f..1b28ef5 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -179,7 +179,7 @@ struct mc32_local u16 rx_ring_tail; /* index to rx de-queue end */ - struct semaphore cmd_mutex; /* Serialises issuing of execute commands */ + struct anon_semaphore cmd_mutex; /* Serialises issuing of execute commands */ struct completion execution_cmd; /* Card has completed an execute command */ struct completion xceiver_cmd; /* Card has completed a tx or rx command */ }; @@ -521,7 +521,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot) lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ - init_MUTEX_LOCKED(&lp->cmd_mutex); + anon_semaphore_init_locked(&lp->cmd_mutex); init_completion(&lp->execution_cmd); init_completion(&lp->xceiver_cmd); @@ -580,7 +580,7 @@ static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int int ioaddr = dev->base_addr; int ret = -1; - if (down_trylock(&lp->cmd_mutex) == 0) + if (anon_down_trylock(&lp->cmd_mutex) == 0) { lp->cmd_nonblocking=1; lp->exec_box->mbox=0; @@ -626,7 +626,7 @@ static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len) int ioaddr = dev->base_addr; int ret = 0; - down(&lp->cmd_mutex); + anon_down(&lp->cmd_mutex); /* * My Turn @@ -646,7 +646,7 @@ static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len) if(lp->exec_box->mbox&(1<<13)) ret = -1; - up(&lp->cmd_mutex); + anon_up(&lp->cmd_mutex); /* * A multicast set got blocked - try it now @@ -916,7 +916,7 @@ static int mc32_open(struct net_device *dev) * Allow ourselves to issue commands */ - up(&lp->cmd_mutex); + anon_up(&lp->cmd_mutex); /* @@ -1384,7 +1384,7 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id) */ if (lp->cmd_nonblocking) { - up(&lp->cmd_mutex); + anon_up(&lp->cmd_mutex); if (lp->mc_reload_wait) mc32_reset_multicast_list(dev); } @@ -1461,7 +1461,7 @@ static int mc32_close(struct net_device *dev) /* Ensure we issue no more commands beyond this point */ - down(&lp->cmd_mutex); + anon_down(&lp->cmd_mutex); /* Ok the card is now stopping */ diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index c34aee9..ee071b3 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -791,9 +791,9 @@ static void poll_vortex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #endif @@ -1762,6 +1762,7 @@ vortex_timer(unsigned long data) int next_tick = 60*HZ; int ok = 0; int media_status, old_window; + unsigned long flags; if (vortex_debug > 2) { pr_debug("%s: Media selection timer tick happened, %s.\n", @@ -1769,7 +1770,7 @@ vortex_timer(unsigned long data) pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); } - disable_irq_lockdep(dev->irq); + spin_lock_irqsave(&vp->lock, flags); old_window = ioread16(ioaddr + EL3_CMD) >> 13; EL3WINDOW(4); media_status = ioread16(ioaddr + Wn4_Media); @@ -1792,10 +1793,7 @@ vortex_timer(unsigned long data) case XCVR_MII: case XCVR_NWAY: { ok = 1; - /* Interrupts are already disabled */ - spin_lock(&vp->lock); vortex_check_media(dev, 0); - spin_unlock(&vp->lock); } break; default: /* Other media types handled by Tx timeouts. */ @@ -1849,7 +1847,7 @@ leave_media_alone: dev->name, media_tbl[dev->if_port].name); EL3WINDOW(old_window); - enable_irq_lockdep(dev->irq); + spin_unlock_irqrestore(&vp->lock, flags); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) iowrite16(FakeIntr, ioaddr + EL3_CMD); @@ -1883,12 +1881,12 @@ static void vortex_tx_timeout(struct net_device *dev) * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } } diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 0e2ba21..560c233 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -2195,7 +2195,11 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) */ static void rtl8139_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); + /* + * use _nosync() variant - might be used by netconsole + * from atomic contexts: + */ + disable_irq_nosync(dev->irq); rtl8139_interrupt(dev->irq, dev); enable_irq(dev->irq); } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b5a7513..5f6509a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1732,6 +1732,7 @@ config KS8842 config KS8851 tristate "Micrel KS8851 SPI" depends on SPI + select MII help SPI driver for Micrel KS8851 SPI attached network chip. diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 2e7419a..5041d10 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c @@ -1228,7 +1228,6 @@ static int at91ether_resume(struct platform_device *pdev) #endif static struct platform_driver at91ether_driver = { - .probe = at91ether_probe, .remove = __devexit_p(at91ether_remove), .suspend = at91ether_suspend, .resume = at91ether_resume, @@ -1240,7 +1239,7 @@ static struct platform_driver at91ether_driver = { static int __init at91ether_init(void) { - return platform_driver_register(&at91ether_driver); + return platform_driver_probe(&at91ether_driver, at91ether_probe); } static void __exit at91ether_exit(void) diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 18b566a..cf30e27 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -318,7 +318,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) pos3 = mca_read_stored_pos( slot, 3 ); pos4 = mca_read_stored_pos( slot, 4 ); - for (l_i = 0; l_i < 0x09; l_i++) + for (l_i = 0; l_i < 8; l_i++) if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i]) break; ioaddr = at1700_mca_probe_list[l_i]; diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index a383122..74cf8f5 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -2069,11 +2069,8 @@ static int atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } tpd_req = atl1c_cal_tpd_req(skb); - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { - if (netif_msg_pktdata(adapter)) - dev_info(&adapter->pdev->dev, "tx locked\n"); - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&adapter->tx_lock, flags); + if (skb->mark == 0x01) type = atl1c_trans_high; else diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index 9fc6d6d..4f6df51 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -1856,8 +1856,7 @@ static int atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } tpd_req = atl1e_cal_tdp_req(skb); - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) - return NETDEV_TX_LOCKED; + spin_lock_irqsave(&adapter->tx_lock, flags); if (atl1e_tpd_avail(adapter) < tpd_req) { /* no enough descriptor, just stop queue */ diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index c43f6a1..dea3155 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -667,7 +667,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_queue_info *rxq = &adapter->rx_obj.q; struct be_rx_page_info *page_info; u16 rxq_idx, i, num_rcvd, j; - u32 pktsize, hdr_len, curr_frag_len; + u32 pktsize, hdr_len, curr_frag_len, size; u8 *start; rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); @@ -708,12 +708,13 @@ static void skb_fill_rx_data(struct be_adapter *adapter, } /* More frags present for this completion */ - pktsize -= curr_frag_len; /* account for above copied frag */ + size = pktsize; for (i = 1, j = 0; i < num_rcvd; i++) { + size -= curr_frag_len; index_inc(&rxq_idx, rxq->len); page_info = get_rx_page_info(adapter, rxq_idx); - curr_frag_len = min(pktsize, rx_frag_size); + curr_frag_len = min(size, rx_frag_size); /* Coalesce all frags from the same physical page in one slot */ if (page_info->page_offset == 0) { @@ -731,7 +732,6 @@ static void skb_fill_rx_data(struct be_adapter *adapter, skb_shinfo(skb)->frags[j].size += curr_frag_len; skb->len += curr_frag_len; skb->data_len += curr_frag_len; - pktsize -= curr_frag_len; memset(page_info, 0, sizeof(*page_info)); } diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b70cc99..e7979a6 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -2836,7 +2836,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if (unlikely(netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq); if ((netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) netif_tx_wake_queue(txq); diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index c36a5f3..2922e00 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -926,7 +926,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) /* TBD need a thresh? */ if (unlikely(netif_tx_queue_stopped(txq))) { - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq); /* Need to make the tx_bd_cons update visible to start_xmit() * before checking for netif_tx_queue_stopped(). Without the diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 3711d64..e6e5abd 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1671,8 +1671,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, struct cmdQ *q = &sge->cmdQ[qid]; unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - if (!spin_trylock(&q->lock)) - return NETDEV_TX_LOCKED; + spin_lock(&q->lock); reclaim_completed_tx(sge, q); diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 4d1515f..4869d77 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -227,7 +227,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, } rcu_read_lock(); - ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); + ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); if (ulp_ops) ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); rcu_read_unlock(); @@ -319,6 +319,20 @@ static int cnic_abort_prep(struct cnic_sock *csk) return 0; } +static void cnic_uio_stop(void) +{ + struct cnic_dev *dev; + + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (cp->cnic_uinfo) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + } + read_unlock(&cnic_dev_lock); +} + int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) { struct cnic_dev *dev; @@ -390,6 +404,9 @@ int cnic_unregister_driver(int ulp_type) } read_unlock(&cnic_dev_lock); + if (ulp_type == CNIC_ULP_ISCSI) + cnic_uio_stop(); + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); mutex_unlock(&cnic_lock); @@ -632,7 +649,6 @@ static void cnic_free_resc(struct cnic_dev *dev) int i = 0; if (cp->cnic_uinfo) { - cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); while (cp->uio_dev != -1 && i < 15) { msleep(100); i++; @@ -1057,6 +1073,9 @@ static void cnic_ulp_stop(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; int if_type; + if (cp->cnic_uinfo) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + rcu_read_lock(); for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { struct cnic_ulp_ops *ulp_ops; diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index cc2ab64..4f70034 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1784,7 +1784,7 @@ int __init init_module(void) printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); } - for (i = 0; io[i] != -1 && i < MAX_EEPRO; i++) { + for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) { dev = alloc_etherdev(sizeof(struct eepro_local)); if (!dev) break; diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 48385c4..160655d 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -584,7 +584,8 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, if (np->flags == HAS_MII_XCVR) { int phy, phy_idx = 0; - for (phy = 1; phy < 32 && phy_idx < 4; phy++) { + for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys); + phy++) { int mii_status = mdio_read(dev, phy, 1); if (mii_status != 0xffff && mii_status != 0x0000) { diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index b892c3a..2bc2d2b 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -754,17 +754,16 @@ static int fs_init_phy(struct net_device *dev) fep->oldlink = 0; fep->oldspeed = 0; fep->oldduplex = -1; - if(fep->fpi->phy_node) - phydev = of_phy_connect(dev, fep->fpi->phy_node, - &fs_adjust_link, 0, - PHY_INTERFACE_MODE_MII); - else { - printk("No phy bus ID specified in BSP code\n"); - return -EINVAL; + + phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (!phydev) { + phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link, + PHY_INTERFACE_MODE_MII); } - if (IS_ERR(phydev)) { - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); - return PTR_ERR(phydev); + if (!phydev) { + dev_err(&dev->dev, "Could not attach to PHY\n"); + return -ENODEV; } fep->phydev = phydev; @@ -1005,6 +1004,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, goto out_free_fpi; } + SET_NETDEV_DEV(ndev, &ofdev->dev); dev_set_drvdata(&ofdev->dev, ndev); fep = netdev_priv(ndev); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 43d813e..f8ffcbf 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -264,15 +264,6 @@ static int gfar_of_init(struct net_device *dev) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; priv->phy_node = of_parse_phandle(np, "phy-handle", 0); - if (!priv->phy_node) { - u32 *fixed_link; - - fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL); - if (!fixed_link) { - err = -ENODEV; - goto err_out; - } - } /* Find the TBI PHY. If it's not there, we don't support SGMII */ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); @@ -659,13 +650,14 @@ static int init_phy(struct net_device *dev) interface = gfar_get_interface(dev); - if (priv->phy_node) { - priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, - 0, interface); - if (!priv->phydev) { - dev_err(&dev->dev, "error: Could not attach to PHY\n"); - return -ENODEV; - } + priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, + interface); + if (!priv->phydev) + priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, + interface); + if (!priv->phydev) { + dev_err(&dev->dev, "could not attach to PHY\n"); + return -ENODEV; } if (interface == PHY_INTERFACE_MODE_SGMII) diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 981ab53..fcc67e0 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -120,7 +120,7 @@ struct sixpack { struct timer_list tx_t; struct timer_list resync_t; atomic_t refcnt; - struct semaphore dead_sem; + struct anon_semaphore dead_sem; spinlock_t lock; }; @@ -412,7 +412,7 @@ static struct sixpack *sp_get(struct tty_struct *tty) static void sp_put(struct sixpack *sp) { if (atomic_dec_and_test(&sp->refcnt)) - up(&sp->dead_sem); + anon_up(&sp->dead_sem); } /* @@ -606,7 +606,7 @@ static int sixpack_open(struct tty_struct *tty) spin_lock_init(&sp->lock); atomic_set(&sp->refcnt, 1); - init_MUTEX_LOCKED(&sp->dead_sem); + anon_semaphore_init_locked(&sp->dead_sem); /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ @@ -702,7 +702,7 @@ static void sixpack_close(struct tty_struct *tty) * we have to wait for all existing users to finish. */ if (!atomic_dec_and_test(&sp->refcnt)) - down(&sp->dead_sem); + anon_down(&sp->dead_sem); unregister_netdev(sp->dev); diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index fda2fc8..dd897b0 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -82,7 +82,7 @@ struct mkiss { #define CRC_MODE_SMACK_TEST 4 atomic_t refcnt; - struct semaphore dead_sem; + struct anon_semaphore dead_sem; }; /*---------------------------------------------------------------------------*/ @@ -718,7 +718,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty) static void mkiss_put(struct mkiss *ax) { if (atomic_dec_and_test(&ax->refcnt)) - up(&ax->dead_sem); + anon_up(&ax->dead_sem); } static int crc_force = 0; /* Can be overridden with insmod */ @@ -745,7 +745,7 @@ static int mkiss_open(struct tty_struct *tty) spin_lock_init(&ax->buflock); atomic_set(&ax->refcnt, 1); - init_MUTEX_LOCKED(&ax->dead_sem); + anon_semaphore_init_locked(&ax->dead_sem); ax->tty = tty; tty->disc_data = ax; @@ -824,7 +824,7 @@ static void mkiss_close(struct tty_struct *tty) * we have to wait for all existing users to finish. */ if (!atomic_dec_and_test(&ax->refcnt)) - down(&ax->dead_sem); + anon_down(&ax->dead_sem); unregister_netdev(ax->dev); diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index fd0796c..aa04bbd 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -908,7 +908,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n dev->tx_skb = NULL; spin_lock_init(&dev->tx_lock); - init_MUTEX(&dev->fsm.sem); + semaphore_init(&dev->fsm.sem); dev->drv = drv; dev->netdev = ndev; diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index cd22323..1b12c7b 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -327,6 +327,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28) #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) u32 flags2; diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index da2c851..1c72657 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -139,6 +139,18 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; } adapter->flags |= IXGBE_FLAG_DCB_ENABLED; +#ifdef IXGBE_FCOE + /* Turn on FCoE offload */ + if ((adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) && + (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))) { + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = + IXGBE_FCRETA_SIZE; + netdev->features |= NETIF_F_FCOE_CRC; + netdev->features |= NETIF_F_FSO; + netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + } +#endif /* IXGBE_FCOE */ ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); @@ -156,6 +168,18 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) adapter->flags |= IXGBE_FLAG_RSS_ENABLED; if (adapter->hw.mac.type == ixgbe_mac_82599EB) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* Turn off FCoE offload */ + if (adapter->flags & (IXGBE_FLAG_FCOE_CAPABLE | + IXGBE_FLAG_FCOE_ENABLED)) { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; + netdev->features &= ~NETIF_F_FCOE_CRC; + netdev->features &= ~NETIF_F_FSO; + netdev->fcoe_ddp_xid = 0; + } +#endif /* IXGBE_FCOE */ ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e3442f4..200454f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -510,8 +511,11 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, * @skb: skb currently being received and modified **/ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, - u32 status_err, struct sk_buff *skb) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { + u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); + skb->ip_summed = CHECKSUM_NONE; /* Rx csum disabled */ @@ -529,6 +533,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, return; if (status_err & IXGBE_RXDADV_ERR_TCPE) { + u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as + * checksum errors. + */ + if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && + (adapter->hw.mac.type == ixgbe_mac_82599EB)) + return; + adapter->hw_csum_rx_error++; return; } @@ -802,7 +816,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, goto next_desc; } - ixgbe_rx_checksum(adapter, staterr, skb); + ixgbe_rx_checksum(adapter, rx_desc, skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -3806,8 +3820,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->atr_sample_rate = 20; adapter->fdir_pballoc = 0; #ifdef IXGBE_FCOE - adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; #endif /* IXGBE_FCOE */ } @@ -5125,9 +5140,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) int count = 0; unsigned int f; - r_idx = skb->queue_mapping; - tx_ring = &adapter->tx_ring[r_idx]; - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { @@ -5137,11 +5149,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - tx_flags |= (skb->queue_mapping << 13); - tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_VLAN; + if (skb->priority != TC_PRIO_CONTROL) { + tx_flags |= (skb->queue_mapping << 13); + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } else { + skb->queue_mapping = + adapter->ring_feature[RING_F_DCB].indices-1; + } } + r_idx = skb->queue_mapping; + tx_ring = &adapter->tx_ring[r_idx]; + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && (skb->protocol == htons(ETH_P_FCOE))) tx_flags |= IXGBE_TX_FLAGS_FCOE; @@ -5580,16 +5600,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #endif #ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { if (hw->mac.ops.get_device_caps) { hw->mac.ops.get_device_caps(hw, &device_caps); - if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) { - netdev->features |= NETIF_F_FCOE_CRC; - netdev->features |= NETIF_F_FSO; - netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; - } else { - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - } + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; } } #endif /* IXGBE_FCOE */ @@ -5638,7 +5653,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->wol = 0; break; } - device_init_wakeup(&adapter->pdev->dev, true); device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* pick up the PCI bus settings for reporting later */ diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index da472c6..15c6599 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -65,6 +65,14 @@ struct pcpu_lstats { unsigned long drops; }; +#ifdef CONFIG_PREEMPT_RT +# define xmit_get_cpu() get_cpu() +# define xmit_put_cpu() put_cpu() +#else +# define xmit_get_cpu() smp_processor_id() +# define xmit_put_cpu() do { } while (0) +#endif + /* * The higher levels take care of making this non-reentrant (it's * called with bh's disabled). @@ -72,22 +80,23 @@ struct pcpu_lstats { static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) { struct pcpu_lstats *pcpu_lstats, *lb_stats; - int len; + int len, res; skb_orphan(skb); skb->protocol = eth_type_trans(skb, dev); + len = skb->len; + res = netif_rx_ni(skb); - /* it's OK to use per_cpu_ptr() because BHs are off */ pcpu_lstats = dev->ml_priv; - lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); + lb_stats = per_cpu_ptr(pcpu_lstats, xmit_get_cpu()); - len = skb->len; - if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { + if (likely(res == NET_RX_SUCCESS)) { lb_stats->bytes += len; lb_stats->packets++; } else lb_stats->drops++; + xmit_put_cpu(); return 0; } diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 5bd79c2..a964488 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #include diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 0f32db3..ee2c56d 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -508,7 +508,7 @@ static void txq_maybe_wake(struct tx_queue *txq) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); if (netif_tx_queue_stopped(nq)) { - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq); if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) netif_tx_wake_queue(nq); __netif_tx_unlock(nq); @@ -899,7 +899,7 @@ static void txq_kick(struct tx_queue *txq) u32 hw_desc_ptr; u32 expected_ptr; - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq); if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) goto out; @@ -923,7 +923,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 5d3343e..f12abcc 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -184,6 +184,13 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) kfree(recv_ctx->rds_rings); skip_rds: + if (recv_ctx->sds_rings == NULL) + goto skip_sds; + + for(ring = 0; ring < adapter->max_sds_rings; ring++) + recv_ctx->sds_rings[ring].consumer = 0; + +skip_sds: if (adapter->tx_ring == NULL) return; @@ -1401,7 +1408,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) smp_mb(); if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { - __netif_tx_lock(tx_ring->txq, smp_processor_id()); + __netif_tx_lock(tx_ring->txq); if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_wake_queue(netdev); __netif_tx_unlock(tx_ring->txq); diff --git a/drivers/net/niu.c b/drivers/net/niu.c index d2146d4..4b6d8ce 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -3681,7 +3681,7 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq); if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_tx_wake_queue(txq); diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 33984b7..22cdd45 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -30,6 +30,7 @@ #ifdef CONFIG_OF_GPIO #include +#include #include #endif @@ -81,13 +82,12 @@ static struct mdiobb_ops mdio_gpio_ops = { .get_mdio_data = mdio_get, }; -static int __devinit mdio_gpio_bus_init(struct device *dev, +static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev, struct mdio_gpio_platform_data *pdata, int bus_id) { struct mii_bus *new_bus; struct mdio_gpio_info *bitbang; - int ret = -ENOMEM; int i; bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); @@ -104,8 +104,6 @@ static int __devinit mdio_gpio_bus_init(struct device *dev, new_bus->name = "GPIO Bitbanged MDIO", - ret = -ENODEV; - new_bus->phy_mask = pdata->phy_mask; new_bus->irq = pdata->irqs; new_bus->parent = dev; @@ -129,15 +127,8 @@ static int __devinit mdio_gpio_bus_init(struct device *dev, dev_set_drvdata(dev, new_bus); - ret = mdiobus_register(new_bus); - if (ret) - goto out_free_all; - - return 0; + return new_bus; -out_free_all: - dev_set_drvdata(dev, NULL); - gpio_free(bitbang->mdio); out_free_mdc: gpio_free(bitbang->mdc); out_free_bus: @@ -145,30 +136,47 @@ out_free_bus: out_free_bitbang: kfree(bitbang); out: - return ret; + return NULL; } -static void __devexit mdio_gpio_bus_destroy(struct device *dev) +static void __devinit mdio_gpio_bus_deinit(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); struct mdio_gpio_info *bitbang = bus->priv; - mdiobus_unregister(bus); - free_mdio_bitbang(bus); dev_set_drvdata(dev, NULL); - gpio_free(bitbang->mdc); gpio_free(bitbang->mdio); + gpio_free(bitbang->mdc); + free_mdio_bitbang(bus); kfree(bitbang); } +static void __devexit mdio_gpio_bus_destroy(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + mdio_gpio_bus_deinit(dev); +} + static int __devinit mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; + struct mii_bus *new_bus; + int ret; if (!pdata) return -ENODEV; - return mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); + new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); + if (!new_bus) + return -ENODEV; + + ret = mdiobus_register(new_bus); + if (ret) + mdio_gpio_bus_deinit(&pdev->dev); + + return ret; } static int __devexit mdio_gpio_remove(struct platform_device *pdev) @@ -179,29 +187,12 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev) } #ifdef CONFIG_OF_GPIO -static void __devinit add_phy(struct mdio_gpio_platform_data *pdata, - struct device_node *np) -{ - const u32 *data; - int len, id, irq; - - data = of_get_property(np, "reg", &len); - if (!data || len != 4) - return; - - id = *data; - pdata->phy_mask &= ~(1 << id); - - irq = of_irq_to_resource(np, 0, NULL); - if (irq) - pdata->irqs[id] = irq; -} static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = NULL; struct mdio_gpio_platform_data *pdata; + struct mii_bus *new_bus; int ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); @@ -215,14 +206,18 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, ret = of_get_gpio(ofdev->node, 1); if (ret < 0) - goto out_free; + goto out_free; pdata->mdio = ret; - while ((np = of_get_next_child(ofdev->node, np))) - if (!strcmp(np->type, "ethernet-phy")) - add_phy(pdata, np); + new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); + if (!new_bus) + return -ENODEV; - return mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); + ret = of_mdiobus_register(new_bus, ofdev->node); + if (ret) + mdio_gpio_bus_deinit(&ofdev->dev); + + return ret; out_free: kfree(pdata); diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 6de8399..1a29a1c 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -67,7 +67,7 @@ struct asyncppp { struct tasklet_struct tsk; atomic_t refcnt; - struct semaphore dead_sem; + struct anon_semaphore dead_sem; struct ppp_channel chan; /* interface to generic ppp layer */ unsigned char obuf[OBUFSIZE]; }; @@ -145,7 +145,7 @@ static struct asyncppp *ap_get(struct tty_struct *tty) static void ap_put(struct asyncppp *ap) { if (atomic_dec_and_test(&ap->refcnt)) - up(&ap->dead_sem); + anon_up(&ap->dead_sem); } /* @@ -183,7 +183,7 @@ ppp_asynctty_open(struct tty_struct *tty) tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); atomic_set(&ap->refcnt, 1); - init_MUTEX_LOCKED(&ap->dead_sem); + anon_semaphore_init_locked(&ap->dead_sem); ap->chan.private = ap; ap->chan.ops = &async_ops; @@ -232,7 +232,7 @@ ppp_asynctty_close(struct tty_struct *tty) * by the time it returns. */ if (!atomic_dec_and_test(&ap->refcnt)) - down(&ap->dead_sem); + anon_down(&ap->dead_sem); tasklet_kill(&ap->tsk); ppp_unregister_channel(&ap->chan); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 4b53b58..b82780d 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2060,8 +2060,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } - pci_set_master(pdev); - /* ioremap MMIO region */ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); if (!ioaddr) { @@ -2089,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) RTL_W16(IntrStatus, 0xffff); + pci_set_master(pdev); + /* Identify chip attached to board */ rtl8169_get_mac_version(tp, ioaddr); @@ -3874,6 +3874,15 @@ static void rtl_shutdown(struct pci_dev *pdev) spin_unlock_irq(&tp->lock); if (system_state == SYSTEM_POWER_OFF) { + /* WoL fails with some 8168 when the receiver is disabled. */ + if (tp->features & RTL_FEATURE_WOL) { + pci_clear_master(pdev); + + RTL_W8(ChipCmd, CmdRxEnb); + /* PCI commit */ + RTL_R8(ChipCmd); + } + pci_wake_from_d3(pdev, true); pci_set_power_state(pdev, PCI_D3hot); } diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 8702e7a..f4703ac 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -180,11 +180,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) u16 destid; unsigned long flags; - local_irq_save(flags); - if (!spin_trylock(&rnet->tx_lock)) { - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&rnet->tx_lock, flags); if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) { netif_stop_queue(ndev); diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 458daa0..60913bf 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -4161,12 +4161,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) [skb->priority & (MAX_TX_FIFOS - 1)]; fifo = &mac_control->fifos[queue]; - if (do_spin_lock) - spin_lock_irqsave(&fifo->tx_lock, flags); - else { - if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&fifo->tx_lock, flags); if (sp->config.multiq) { if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 60d502e..543af20 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -3854,8 +3854,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, skge->speed = -1; skge->advertising = skge_supported_modes(hw); - if (device_may_wakeup(&hw->pdev->dev)) + if (device_can_wakeup(&hw->pdev->dev)) { skge->wol = wol_supported(hw) & WAKE_MAGIC; + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + } hw->dev[port] = dev; diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index f1f773b..57a159f 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -186,7 +186,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) #define SMC_IRQ_FLAGS (-1) /* from resource */ -#elif defined(CONFIG_MACH_LOGICPD_PXA270) +#elif defined(CONFIG_MACH_LOGICPD_PXA270) \ + || defined(CONFIG_MACH_NOMADIK_8815NHK) #define SMC_CAN_USE_8BIT 0 #define SMC_CAN_USE_16BIT 1 diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index d2dfe0a..d47ce6c 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -1032,12 +1032,8 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) (csum_stuff_off << 21)); } - local_irq_save(flags); - if (!spin_trylock(&gp->tx_lock)) { - /* Tell upper layer to requeue */ - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&gp->tx_lock, flags); + /* We raced with gem_do_stop() */ if (!gp->running) { spin_unlock_irqrestore(&gp->tx_lock, flags); diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index 3c2679c..ce65846 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c @@ -1638,13 +1638,8 @@ static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) unsigned long flags; ENTER; - local_irq_save(flags); - if (!spin_trylock(&priv->tx_lock)) { - local_irq_restore(flags); - DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", - BDX_DRV_NAME, ndev->name); - return NETDEV_TX_LOCKED; - } + + spin_lock_irqsave(&priv->tx_lock, flags); /* build tx descriptor */ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 9d89611..08a6c41 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -1912,7 +1912,7 @@ static int __init ibmtr_init(void) find_turbo_adapters(io); - for (i = 0; io[i] && (i < IBMTR_MAX_ADAPTERS); i++) { + for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) { struct net_device *dev; irq[i] = 0; mem[i] = 0; diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 99a6364..5889238 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -1816,6 +1816,7 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev) pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); + pci_disable_device (pdev); pci_set_drvdata (pdev, NULL); /* pci_power_off (pdev, -1); */ diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 40c6eba..3b957e6 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -1590,13 +1590,13 @@ static int init_phy(struct net_device *dev) priv->oldspeed = 0; priv->oldduplex = -1; - if (!ug_info->phy_node) - return 0; - phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, priv->phy_interface); + if (!phydev) + phydev = of_phy_connect_fixed_link(dev, &adjust_link, + priv->phy_interface); if (!phydev) { - printk("%s: Could not attach to PHY\n", dev->name); + dev_err(&dev->dev, "Could not attach to PHY\n"); return -ENODEV; } @@ -3608,9 +3608,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma struct ucc_geth_private *ugeth = NULL; struct ucc_geth_info *ug_info; struct resource res; - struct device_node *phy; int err, ucc_num, max_speed = 0; - const u32 *fixed_link; const unsigned int *prop; const char *sprop; const void *mac_addr; @@ -3708,15 +3706,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma ug_info->uf_info.regs = res.start; ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); - fixed_link = of_get_property(np, "fixed-link", NULL); - if (fixed_link) { - phy = NULL; - } else { - phy = of_parse_phandle(np, "phy-handle", 0); - if (phy == NULL) - return -ENODEV; - } - ug_info->phy_node = phy; + + ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); /* Find the TBI PHY node. If it's not there, we don't support SGMII */ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); @@ -3725,7 +3716,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma prop = of_get_property(np, "phy-connection-type", NULL); if (!prop) { /* handle interface property present in old trees */ - prop = of_get_property(phy, "interface", NULL); + prop = of_get_property(ug_info->phy_node, "interface", NULL); if (prop != NULL) { phy_interface = enet_to_phy_interface[*prop]; max_speed = enet_to_speed[*prop]; diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 61581ee..40d64ce 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -574,7 +574,7 @@ static int cosa_probe(int base, int irq, int dma) /* Initialize the chardev data structures */ mutex_init(&chan->rlock); - init_MUTEX(&chan->wsem); + semaphore_init(&chan->wsem); /* Register the network interface */ if (!(chan->netdev = alloc_hdlcdev(chan))) { diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index ea04515..029c1bc 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -2970,6 +2970,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (modparam_nohwcrypt) return -EOPNOTSUPP; + if (sc->opmode == NL80211_IFTYPE_AP) + return -EOPNOTSUPP; + switch (key->alg) { case ALG_WEP: case ALG_TKIP: diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 1aeafb5..aad259b 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -478,6 +478,18 @@ void ath9k_ani_reset(struct ath_hw *ah) "Reset ANI state opmode %u\n", ah->opmode); ah->stats.ast_ani_reset++; + if (ah->opmode == NL80211_IFTYPE_AP) { + /* + * ath9k_hw_ani_control() will only process items set on + * ah->ani_function + */ + if (IS_CHAN_2GHZ(chan)) + ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL | + ATH9K_ANI_FIRSTEP_LEVEL); + else + ah->ani_function = 0; + } + ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0); ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0); diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index eef370b..bf3d25b 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -474,6 +474,21 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, return 0; } +/* + * Some users have reported their EEPROM programmed with + * 0x8000 set, this is not a supported regulatory domain + * but since we have more than one user with it we need + * a solution for them. We default to 0x64, which is the + * default Atheros world regulatory domain. + */ +static void ath_regd_sanitize(struct ath_regulatory *reg) +{ + if (reg->current_rd != COUNTRY_ERD_FLAG) + return; + printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n"); + reg->current_rd = 0x64; +} + int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, @@ -486,6 +501,8 @@ ath_regd_init(struct ath_regulatory *reg, if (!reg) return -EINVAL; + ath_regd_sanitize(reg); + printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); if (!ath_regd_is_eeprom_valid(reg)) { diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 6d1519e..355f50e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2675,12 +2675,10 @@ static ssize_t show_power_level(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); - int mode = priv->power_data.user_power_setting; int level = priv->power_data.power_mode; char *p = buf; - p += sprintf(p, "INDEX:%d\t", level); - p += sprintf(p, "USER:%d\n", mode); + p += sprintf(p, "%d\n", level); return p - buf + 1; } diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 85ae7a6..9bbeec9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -872,7 +872,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); /* Set up entry for this TFD in Tx byte-count array */ - priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, + if (info->flags & IEEE80211_TX_CTL_AMPDU) + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len)); pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index cb9bd4c..956798f 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -3643,12 +3643,10 @@ static ssize_t show_power_level(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); - int mode = priv->power_data.user_power_setting; int level = priv->power_data.power_mode; char *p = buf; - p += sprintf(p, "INDEX:%d\t", level); - p += sprintf(p, "USER:%d\n", mode); + p += sprintf(p, "%d\n", level); return p - buf + 1; } diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index aaa20c6..aea5ccf 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c @@ -151,8 +151,8 @@ void iwm_if_free(struct iwm_priv *iwm) return; free_netdev(iwm_to_ndev(iwm)); - iwm_wdev_free(iwm); iwm_priv_deinit(iwm); + iwm_wdev_free(iwm); } int iwm_if_add(struct iwm_priv *iwm) diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 01db705..6850981 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -135,8 +135,14 @@ int lbs_update_hw_spec(struct lbs_private *priv) /* Clamp region code to 8-bit since FW spec indicates that it should * only ever be 8-bit, even though the field size is 16-bit. Some firmware * returns non-zero high 8 bits here. + * + * Firmware version 4.0.102 used in CF8381 has region code shifted. We + * need to check for this problem and handle it properly. */ - priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; + if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V4) + priv->regioncode = (le16_to_cpu(cmd.regioncode) >> 8) & 0xFF; + else + priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { /* use the region code to search for the index */ diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h index 48da157..72f3479 100644 --- a/drivers/net/wireless/libertas/defs.h +++ b/drivers/net/wireless/libertas/defs.h @@ -234,6 +234,8 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in /** Mesh enable bit in FW capability */ #define MESH_CAPINFO_ENABLE_MASK (1<<16) +/** FW definition from Marvell v4 */ +#define MRVL_FW_V4 (0x04) /** FW definition from Marvell v5 */ #define MRVL_FW_V5 (0x05) /** FW definition from Marvell v10 */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index a111bda..7916ca3 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -709,7 +709,7 @@ static const struct ieee80211_ops mac80211_hwsim_ops = static void mac80211_hwsim_free(void) { struct list_head tmplist, *i, *tmp; - struct mac80211_hwsim_data *data; + struct mac80211_hwsim_data *data, *tmpdata; INIT_LIST_HEAD(&tmplist); @@ -718,7 +718,7 @@ static void mac80211_hwsim_free(void) list_move(i, &tmplist); spin_unlock_bh(&hwsim_radio_lock); - list_for_each_entry(data, &tmplist, list) { + list_for_each_entry_safe(data, tmpdata, &tmplist, list) { debugfs_remove(data->debugfs_group); debugfs_remove(data->debugfs_ps); debugfs_remove(data->debugfs); @@ -1167,8 +1167,8 @@ static void __exit exit_mac80211_hwsim(void) { printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n"); - unregister_netdev(hwsim_mon); mac80211_hwsim_free(); + unregister_netdev(hwsim_mon); } diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 83116ba..72c7dbd 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -635,7 +635,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) hw = p54_init_common(sizeof(*priv)); if (!hw) { - dev_err(&priv->spi->dev, "could not alloc ieee80211_hw"); + dev_err(&spi->dev, "could not alloc ieee80211_hw"); return -ENOMEM; } diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 66daf68..ce75426 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -1550,7 +1550,9 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2500usb_register_read(rt2x00dev, MAC_CSR0, ®); rt2x00_set_chip(rt2x00dev, RT2570, value, reg); - if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0)) { + if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) || + rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); return -ENODEV; } diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c index b442535..cf9f899 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_leds.c +++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c @@ -208,11 +208,12 @@ void rtl8187_leds_exit(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; - rtl8187_unregister_led(&priv->led_tx); /* turn the LED off before exiting */ queue_delayed_work(dev->workqueue, &priv->led_off, 0); cancel_delayed_work_sync(&priv->led_off); + cancel_delayed_work_sync(&priv->led_on); rtl8187_unregister_led(&priv->led_rx); + rtl8187_unregister_led(&priv->led_tx); } #endif /* def CONFIG_RTL8187_LED */ diff --git a/drivers/of/base.c b/drivers/of/base.c index 69f85c0..26f4a62 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -25,7 +25,7 @@ struct device_node *allnodes; /* use when traversing tree through the allnext, child, sibling, * or parent members of struct device_node. */ -DEFINE_RWLOCK(devtree_lock); +DEFINE_ATOMIC_SPINLOCK(devtree_lock); int of_n_addr_cells(struct device_node *np) { @@ -68,7 +68,7 @@ struct property *of_find_property(const struct device_node *np, if (!np) return NULL; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); for (pp = np->properties; pp != 0; pp = pp->next) { if (of_prop_cmp(pp->name, name) == 0) { if (lenp != 0) @@ -76,7 +76,7 @@ struct property *of_find_property(const struct device_node *np, break; } } - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return pp; } @@ -159,9 +159,9 @@ struct device_node *of_get_parent(const struct device_node *node) if (!node) return NULL; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); np = of_node_get(node->parent); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_get_parent); @@ -184,10 +184,10 @@ struct device_node *of_get_next_parent(struct device_node *node) if (!node) return NULL; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); parent = of_node_get(node->parent); of_node_put(node); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return parent; } @@ -204,13 +204,13 @@ struct device_node *of_get_next_child(const struct device_node *node, { struct device_node *next; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); next = prev ? prev->sibling : node->child; for (; next; next = next->sibling) if (of_node_get(next)) break; of_node_put(prev); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return next; } EXPORT_SYMBOL(of_get_next_child); @@ -226,13 +226,13 @@ struct device_node *of_find_node_by_path(const char *path) { struct device_node *np = allnodes; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); for (; np; np = np->allnext) { if (np->full_name && (of_node_cmp(np->full_name, path) == 0) && of_node_get(np)) break; } - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_path); @@ -253,14 +253,14 @@ struct device_node *of_find_node_by_name(struct device_node *from, { struct device_node *np; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) if (np->name && (of_node_cmp(np->name, name) == 0) && of_node_get(np)) break; of_node_put(from); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_name); @@ -282,14 +282,14 @@ struct device_node *of_find_node_by_type(struct device_node *from, { struct device_node *np; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) if (np->type && (of_node_cmp(np->type, type) == 0) && of_node_get(np)) break; of_node_put(from); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_type); @@ -313,7 +313,7 @@ struct device_node *of_find_compatible_node(struct device_node *from, { struct device_node *np; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) { if (type @@ -323,7 +323,7 @@ struct device_node *of_find_compatible_node(struct device_node *from, break; } of_node_put(from); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_compatible_node); @@ -346,7 +346,7 @@ struct device_node *of_find_node_with_property(struct device_node *from, struct device_node *np; struct property *pp; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) { for (pp = np->properties; pp != 0; pp = pp->next) { @@ -358,7 +358,7 @@ struct device_node *of_find_node_with_property(struct device_node *from, } out: of_node_put(from); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_with_property); @@ -409,14 +409,14 @@ struct device_node *of_find_matching_node(struct device_node *from, { struct device_node *np; - read_lock(&devtree_lock); + atomic_spin_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) { if (of_match_node(matches, np) && of_node_get(np)) break; } of_node_put(from); - read_unlock(&devtree_lock); + atomic_spin_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_matching_node); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index aee967d..bacaa53 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -9,6 +9,10 @@ * out of the OpenFirmware device tree and using it to populate an mii_bus. */ +#include +#include +#include +#include #include #include #include @@ -137,3 +141,41 @@ struct phy_device *of_phy_connect(struct net_device *dev, return phy_connect_direct(dev, phy, hndlr, flags, iface) ? NULL : phy; } EXPORT_SYMBOL(of_phy_connect); + +/** + * of_phy_connect_fixed_link - Parse fixed-link property and return a dummy phy + * @dev: pointer to net_device claiming the phy + * @hndlr: Link state callback for the network device + * @iface: PHY data interface type + * + * This function is a temporary stop-gap and will be removed soon. It is + * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers. Do + * not call this function from new drivers. + */ +struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, + void (*hndlr)(struct net_device *), + phy_interface_t iface) +{ + struct device_node *net_np; + char bus_id[MII_BUS_ID_SIZE + 3]; + struct phy_device *phy; + const u32 *phy_id; + int sz; + + if (!dev->dev.parent) + return NULL; + + net_np = dev_archdata_get_node(&dev->dev.parent->archdata); + if (!net_np) + return NULL; + + phy_id = of_get_property(net_np, "fixed-link", &sz); + if (!phy_id || sz < sizeof(*phy_id)) + return NULL; + + sprintf(bus_id, PHY_ID_FMT, "0", phy_id[0]); + + phy = phy_connect(dev, bus_id, hndlr, 0, iface); + return IS_ERR(phy) ? NULL : phy; +} +EXPORT_SYMBOL(of_phy_connect_fixed_link); diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 2b7ae36..284814b 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -72,10 +72,10 @@ int alloc_event_buffer(void) int err = -ENOMEM; unsigned long flags; - spin_lock_irqsave(&oprofilefs_lock, flags); + atomic_spin_lock_irqsave(&oprofilefs_lock, flags); buffer_size = oprofile_buffer_size; buffer_watershed = oprofile_buffer_watershed; - spin_unlock_irqrestore(&oprofilefs_lock, flags); + atomic_spin_unlock_irqrestore(&oprofilefs_lock, flags); if (buffer_watershed >= buffer_size) return -EINVAL; diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index b7e4cee..7cbf76d 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -21,7 +21,7 @@ #define OPROFILEFS_MAGIC 0x6f70726f -DEFINE_SPINLOCK(oprofilefs_lock); +DEFINE_ATOMIC_SPINLOCK(oprofilefs_lock); static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) { @@ -75,9 +75,9 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_ if (copy_from_user(tmpbuf, buf, count)) return -EFAULT; - spin_lock_irqsave(&oprofilefs_lock, flags); + atomic_spin_lock_irqsave(&oprofilefs_lock, flags); *val = simple_strtoul(tmpbuf, NULL, 0); - spin_unlock_irqrestore(&oprofilefs_lock, flags); + atomic_spin_unlock_irqrestore(&oprofilefs_lock, flags); return 0; } diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index 8901ecf..6e2f206 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c @@ -41,7 +41,7 @@ * It will be useful to call this from an interrupt handler. */ static void parport_ieee1284_wakeup (struct parport *port) { - up (&port->physport->ieee1284.irq); + anon_up (&port->physport->ieee1284.irq); } static struct parport *port_from_cookie[PARPORT_MAX]; @@ -83,7 +83,7 @@ int parport_wait_event (struct parport *port, signed long timeout) timer.data = port->number; add_timer (&timer); - ret = down_interruptible (&port->physport->ieee1284.irq); + ret = anon_down_interruptible (&port->physport->ieee1284.irq); if (!del_timer_sync(&timer) && !ret) /* Timed out. */ ret = 1; diff --git a/drivers/parport/share.c b/drivers/parport/share.c index dffa5d4..228942f 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -306,7 +306,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, spin_lock_init(&tmp->pardevice_lock); tmp->ieee1284.mode = IEEE1284_MODE_COMPAT; tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; - init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */ + anon_semaphore_init_locked(&tmp->ieee1284.irq); tmp->spintime = parport_default_spintime; atomic_set (&tmp->ref_count, 1); INIT_LIST_HEAD(&tmp->full_list); diff --git a/drivers/pci/access.c b/drivers/pci/access.c index db23200..fddeb63 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -12,7 +12,7 @@ * configuration space. */ -static DEFINE_SPINLOCK(pci_lock); +static DEFINE_ATOMIC_SPINLOCK(pci_lock); /* * Wrappers for all PCI configuration access functions. They just check @@ -32,10 +32,10 @@ int pci_bus_read_config_##size \ unsigned long flags; \ u32 data = 0; \ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ - spin_lock_irqsave(&pci_lock, flags); \ + atomic_spin_lock_irqsave(&pci_lock, flags); \ res = bus->ops->read(bus, devfn, pos, len, &data); \ *value = (type)data; \ - spin_unlock_irqrestore(&pci_lock, flags); \ + atomic_spin_unlock_irqrestore(&pci_lock, flags); \ return res; \ } @@ -46,9 +46,9 @@ int pci_bus_write_config_##size \ int res; \ unsigned long flags; \ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ - spin_lock_irqsave(&pci_lock, flags); \ + atomic_spin_lock_irqsave(&pci_lock, flags); \ res = bus->ops->write(bus, devfn, pos, len, value); \ - spin_unlock_irqrestore(&pci_lock, flags); \ + atomic_spin_unlock_irqrestore(&pci_lock, flags); \ return res; \ } @@ -78,10 +78,10 @@ struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) struct pci_ops *old_ops; unsigned long flags; - spin_lock_irqsave(&pci_lock, flags); + atomic_spin_lock_irqsave(&pci_lock, flags); old_ops = bus->ops; bus->ops = ops; - spin_unlock_irqrestore(&pci_lock, flags); + atomic_spin_unlock_irqrestore(&pci_lock, flags); return old_ops; } EXPORT_SYMBOL(pci_bus_set_ops); @@ -135,9 +135,9 @@ static noinline void pci_wait_ucfg(struct pci_dev *dev) __add_wait_queue(&pci_ucfg_wait, &wait); do { set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&pci_lock); + atomic_spin_unlock_irq(&pci_lock); schedule(); - spin_lock_irq(&pci_lock); + atomic_spin_lock_irq(&pci_lock); } while (dev->block_ucfg_access); __remove_wait_queue(&pci_ucfg_wait, &wait); } @@ -149,11 +149,11 @@ int pci_user_read_config_##size \ int ret = 0; \ u32 data = -1; \ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ - spin_lock_irq(&pci_lock); \ + atomic_spin_lock_irq(&pci_lock); \ if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \ ret = dev->bus->ops->read(dev->bus, dev->devfn, \ pos, sizeof(type), &data); \ - spin_unlock_irq(&pci_lock); \ + atomic_spin_unlock_irq(&pci_lock); \ *val = (type)data; \ return ret; \ } @@ -164,11 +164,11 @@ int pci_user_write_config_##size \ { \ int ret = -EIO; \ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ - spin_lock_irq(&pci_lock); \ + atomic_spin_lock_irq(&pci_lock); \ if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \ ret = dev->bus->ops->write(dev->bus, dev->devfn, \ pos, sizeof(type), val); \ - spin_unlock_irq(&pci_lock); \ + atomic_spin_unlock_irq(&pci_lock); \ return ret; \ } @@ -395,10 +395,10 @@ void pci_block_user_cfg_access(struct pci_dev *dev) unsigned long flags; int was_blocked; - spin_lock_irqsave(&pci_lock, flags); + atomic_spin_lock_irqsave(&pci_lock, flags); was_blocked = dev->block_ucfg_access; dev->block_ucfg_access = 1; - spin_unlock_irqrestore(&pci_lock, flags); + atomic_spin_unlock_irqrestore(&pci_lock, flags); /* If we BUG() inside the pci_lock, we're guaranteed to hose * the machine */ @@ -416,7 +416,7 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev) { unsigned long flags; - spin_lock_irqsave(&pci_lock, flags); + atomic_spin_lock_irqsave(&pci_lock, flags); /* This indicates a problem in the caller, but we don't need * to kill them, unlike a double-block above. */ @@ -424,6 +424,6 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev) dev->block_ucfg_access = 0; wake_up_all(&pci_ucfg_wait); - spin_unlock_irqrestore(&pci_lock, flags); + atomic_spin_unlock_irqrestore(&pci_lock, flags); } EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index cef28a7..caa6295 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -240,9 +240,9 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), next = dev->bus_list.next; /* Run device routines with the device locked */ - down(&dev->dev.sem); + mutex_lock(&dev->dev.mutex); retval = cb(dev, userdata); - up(&dev->dev.sem); + mutex_unlock(&dev->dev.mutex); if (retval) break; } diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index 83f337c..d120da6 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c @@ -104,7 +104,7 @@ static int to_debug = 0; static struct mutex sem_hpcaccess; // lock access to HPC static struct semaphore semOperations; // lock all operations and // access to data structures -static struct semaphore sem_exit; // make sure polling thread goes away +static struct anon_semaphore sem_exit; // make sure polling thread goes away static struct task_struct *ibmphp_poll_thread; //---------------------------------------------------------------------------- // local function prototypes @@ -132,8 +132,8 @@ void __init ibmphp_hpc_initvars (void) debug ("%s - Entry\n", __func__); mutex_init(&sem_hpcaccess); - init_MUTEX (&semOperations); - init_MUTEX_LOCKED (&sem_exit); + semaphore_init(&semOperations); + anon_semaphore_init_locked(&sem_exit); to_debug = 0; debug ("%s - Exit\n", __func__); @@ -906,7 +906,7 @@ static int poll_hpc(void *data) /* sleep for a short time just for good measure */ msleep(100); } - up (&sem_exit); + anon_up (&sem_exit); debug ("%s - Exit\n", __func__); return 0; } @@ -1076,7 +1076,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void) // wait for poll thread to exit debug ("before sem_exit down \n"); - down (&sem_exit); + anon_down (&sem_exit); debug ("after sem_exit down \n"); // cleanup @@ -1085,7 +1085,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void) debug ("after free_hpc_access \n"); ibmphp_unlock_operations (); debug ("after unlock operations \n"); - up (&sem_exit); + anon_up (&sem_exit); debug ("after sem exit up\n"); debug ("%s - Exit\n", __func__); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index dbd0f94..ef66a59 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2211,7 +2211,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) if (!probe) { pci_block_user_cfg_access(dev); /* block PM suspend, driver probe, etc. */ - down(&dev->dev.sem); + mutex_lock(&dev->dev.mutex); } rc = pcie_flr(dev, probe); @@ -2229,7 +2229,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) rc = pci_parent_bus_reset(dev, probe); done: if (!probe) { - up(&dev->dev.sem); + mutex_unlock(&dev->dev.mutex); pci_unblock_user_cfg_access(dev); } diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 304ff6d..a5bcb5c 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -1082,9 +1082,9 @@ static int runtime_suspend(struct device *dev) { int rc; - down(&dev->sem); + mutex_lock(&dev->mutex); rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND); - up(&dev->sem); + mutex_unlock(&dev->mutex); return rc; } @@ -1092,9 +1092,9 @@ static void runtime_resume(struct device *dev) { int rc; - down(&dev->sem); + mutex_lock(&dev->mutex); rc = pcmcia_dev_resume(dev); - up(&dev->sem); + mutex_unlock(&dev->mutex); } /************************ per-device sysfs output ***************************/ diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index be2fd6f..fb45f5e 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -973,7 +973,7 @@ static int acer_rfkill_set(void *data, bool blocked) { acpi_status status; u32 cap = (unsigned long)data; - status = set_u32(!!blocked, cap); + status = set_u32(!blocked, cap); if (ACPI_FAILURE(status)) return -ENODEV; return 0; diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c index d157665..dde3d10 100644 --- a/drivers/s390/cio/crw.c +++ b/drivers/s390/cio/crw.c @@ -137,7 +137,7 @@ void crw_handle_channel_report(void) */ static int __init crw_init_semaphore(void) { - init_MUTEX_LOCKED(&crw_semaphore); + semaphore_init_locked(&crw_semaphore); return 0; } pure_initcall(crw_init_semaphore); diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 727a809..ed3dcde 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1145,12 +1145,17 @@ ap_config_timeout(unsigned long ptr) */ static inline void ap_schedule_poll_timer(void) { + ktime_t hr_time; if (ap_using_interrupts() || ap_suspend_flag) return; if (hrtimer_is_queued(&ap_poll_timer)) return; - hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), - HRTIMER_MODE_ABS); + if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { + hr_time = ktime_set(0, poll_timeout); + hrtimer_forward_now(&ap_poll_timer, hr_time); + hrtimer_restart(&ap_poll_timer); + } + return; } /** diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index cdbdec9..5ccaa8d 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -719,7 +719,7 @@ struct aac_fib_context { u32 unique; // unique value representing this context ulong jiffies; // used for cleanup - dmb changed to ulong struct list_head next; // used to link context's into a linked list - struct semaphore wait_sem; // this is used to wait for the next fib to arrive. + struct anon_semaphore wait_sem; // this is used to wait for the next fib to arrive. int wait; // Set to true when thread is in WaitForSingleObject unsigned long count; // total number of FIBs on FibList struct list_head fib_list; // this holds fibs and their attachd hw_fibs @@ -789,7 +789,7 @@ struct fib { * This is the event the sendfib routine will wait on if the * caller did not pass one and this is synch io. */ - struct semaphore event_wait; + struct anon_semaphore event_wait; spinlock_t event_lock; u32 done; /* gets set to 1 when fib is complete */ diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 0391d75..ab39bfc 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -190,7 +190,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) /* * Initialize the mutex used to wait for the next AIF. */ - init_MUTEX_LOCKED(&fibctx->wait_sem); + anon_semaphore_init_locked(&fibctx->wait_sem); fibctx->wait = 0; /* * Initialize the fibs and set the count of fibs on @@ -321,7 +321,7 @@ return_fib: ssleep(1); } if (f.wait) { - if(down_interruptible(&fibctx->wait_sem) < 0) { + if(anon_down_interruptible(&fibctx->wait_sem) < 0) { status = -EINTR; } else { /* Lock again and retry */ diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 956261f..b5bda58 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -124,7 +124,7 @@ int aac_fib_setup(struct aac_dev * dev) fibptr->hw_fib_va = hw_fib; fibptr->data = (void *) fibptr->hw_fib_va->data; fibptr->next = fibptr+1; /* Forward chain the fibs */ - init_MUTEX_LOCKED(&fibptr->event_wait); + anon_semaphore_init_locked(&fibptr->event_wait); spin_lock_init(&fibptr->event_lock); hw_fib->header.XferState = cpu_to_le32(0xffffffff); hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); @@ -490,7 +490,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, * hardware failure has occurred. */ unsigned long count = 36000000L; /* 3 minutes */ - while (down_trylock(&fibptr->event_wait)) { + while (anon_down_trylock(&fibptr->event_wait)) { int blink; if (--count == 0) { struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; @@ -515,9 +515,9 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, } udelay(5); } - } else if (down_interruptible(&fibptr->event_wait)) { + } else if (anon_down_interruptible(&fibptr->event_wait)) { fibptr->done = 2; - up(&fibptr->event_wait); + anon_up(&fibptr->event_wait); } spin_lock_irqsave(&fibptr->event_lock, flags); if ((fibptr->done == 0) || (fibptr->done == 2)) { @@ -1177,7 +1177,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) { unsigned long flagv; spin_lock_irqsave(&fib->event_lock, flagv); - up(&fib->event_wait); + anon_up(&fib->event_wait); spin_unlock_irqrestore(&fib->event_lock, flagv); schedule(); retval = 0; @@ -1460,7 +1460,7 @@ int aac_check_health(struct aac_dev * aac) * Set the event to wake up the * thread that will waiting. */ - up(&fibctx->wait_sem); + anon_up(&fibctx->wait_sem); } else { printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); kfree(fib); @@ -1691,7 +1691,7 @@ int aac_command_thread(void *data) * Set the event to wake up the * thread that is waiting. */ - up(&fibctx->wait_sem); + anon_up(&fibctx->wait_sem); } else { printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); } diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index abc9ef5..0e29b5f 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -127,7 +127,7 @@ unsigned int aac_response_normal(struct aac_queue * q) spin_lock_irqsave(&fib->event_lock, flagv); if (!fib->done) fib->done = 1; - up(&fib->event_wait); + anon_up(&fib->event_wait); spin_unlock_irqrestore(&fib->event_lock, flagv); FIB_COUNTER_INCREMENT(aac_config.NormalRecved); if (fib->done == 2) { @@ -322,7 +322,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) spin_lock_irqsave(&fib->event_lock, flagv); if (!fib->done) fib->done = 1; - up(&fib->event_wait); + anon_up(&fib->event_wait); spin_unlock_irqrestore(&fib->event_lock, flagv); FIB_COUNTER_INCREMENT(aac_config.NormalRecved); } diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index fb867a9..d406333 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -1595,7 +1595,12 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id) l = l->next; - if (l == i->head && pass_counter++ > PASS_LIMIT) { + /* + * On preempt-rt we can be preempted and run in our + * own thread. + */ + if (!preempt_rt() && l == i->head && + pass_counter++ > PASS_LIMIT) { /* If we hit this, we're dead. */ printk(KERN_ERR "serial8250: too much work for " "irq%d\n", irq); @@ -2729,14 +2734,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) touch_nmi_watchdog(); - local_irq_save(flags); - if (up->port.sysrq) { - /* serial8250_handle_port() already took the lock */ - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&up->port.lock); - } else - spin_lock(&up->port.lock); + if (up->port.sysrq || oops_in_progress || preempt_rt()) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); /* * First save the IER then disable the interrupts @@ -2768,8 +2769,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) check_modem_status(up); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int __init serial8250_console_setup(struct console *co, char *options) diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c index cc4c046..aac9a65 100644 --- a/drivers/staging/comedi/drivers/dt9812.c +++ b/drivers/staging/comedi/drivers/dt9812.c @@ -262,7 +262,7 @@ struct dt9812_usb_cmd { #define DT9812_NUM_SLOTS 16 -static DECLARE_MUTEX(dt9812_mutex); +static DEFINE_SEMAPHORE(dt9812_mutex); static struct usb_device_id dt9812_table[] = { {USB_DEVICE(0x0867, 0x9812)}, @@ -1121,7 +1121,7 @@ static int __init usb_dt9812_init(void) /* Initialize all driver slots */ for (i = 0; i < DT9812_NUM_SLOTS; i++) { - init_MUTEX(&dt9812[i].mutex); + semaphore_init(&dt9812[i].mutex); dt9812[i].serial = 0; dt9812[i].usb = NULL; dt9812[i].comedi = NULL; diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c index 171a6f2..7c84121 100644 --- a/drivers/staging/comedi/drivers/usbdux.c +++ b/drivers/staging/comedi/drivers/usbdux.c @@ -307,7 +307,7 @@ struct usbduxsub { */ static struct usbduxsub usbduxsub[NUMUSBDUX]; -static DECLARE_MUTEX(start_stop_sem); +static DEFINE_SEMAPHORE(start_stop_sem); /* * Stops the data acquision @@ -2349,7 +2349,7 @@ static int usbduxsub_probe(struct usb_interface *uinterf, dev_dbg(dev, "comedi_: usbdux: " "usbduxsub[%d] is ready to connect to comedi.\n", index); - init_MUTEX(&(usbduxsub[index].sem)); + semaphore_init(&(usbduxsub[index].sem)); /* save a pointer to the usb device */ usbduxsub[index].usbdev = udev; diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index 939b53f..3a465ba 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -201,7 +201,7 @@ struct usbduxfastsub_s { */ static struct usbduxfastsub_s usbduxfastsub[NUMUSBDUXFAST]; -static DECLARE_MUTEX(start_stop_sem); +static DEFINE_SEMAPHORE(start_stop_sem); /* * bulk transfers to usbduxfast @@ -1500,7 +1500,7 @@ static int usbduxfastsub_probe(struct usb_interface *uinterf, "connect to comedi.\n", index); #endif - init_MUTEX(&(usbduxfastsub[index].sem)); + semaphore_init(&(usbduxfastsub[index].sem)); /* save a pointer to the usb device */ usbduxfastsub[index].usbdev = udev; diff --git a/drivers/staging/cpc-usb/cpc-usb_drv.c b/drivers/staging/cpc-usb/cpc-usb_drv.c index 9bf3f98..9a51965 100644 --- a/drivers/staging/cpc-usb/cpc-usb_drv.c +++ b/drivers/staging/cpc-usb/cpc-usb_drv.c @@ -83,7 +83,7 @@ static CPC_USB_T *CPCUSB_Table[CPC_USB_CARD_CNT] = { 0 }; static unsigned int CPCUsbCnt; /* prevent races between open() and disconnect() */ -static DECLARE_MUTEX(disconnect_sem); +static DEFINE_SEMAPHORE(disconnect_sem); /* local function prototypes */ static ssize_t cpcusb_read(struct file *file, char *buffer, size_t count, @@ -903,7 +903,7 @@ static int cpcusb_probe(struct usb_interface *interface, memset(chan, 0, sizeof(CPC_CHAN_T)); ResetBuffer(chan); - init_MUTEX(&card->sem); + semaphore_init(&card->sem); spin_lock_init(&card->slock); card->udev = udev; diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c index 15aed87..d4d801e 100644 --- a/drivers/staging/frontier/alphatrack.c +++ b/drivers/staging/frontier/alphatrack.c @@ -678,7 +678,7 @@ static int usb_alphatrack_probe(struct usb_interface *intf, dev_err(&intf->dev, "Out of memory\n"); goto exit; } - init_MUTEX(&dev->sem); + semaphore_init(&dev->sem); dev->intf = intf; init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c index ef8fcc8..81db34b 100644 --- a/drivers/staging/frontier/tranzport.c +++ b/drivers/staging/frontier/tranzport.c @@ -800,7 +800,7 @@ static int usb_tranzport_probe(struct usb_interface *intf, dev_err(&intf->dev, "Out of memory\n"); goto exit; } - init_MUTEX(&dev->sem); + semaphore_init(&dev->sem); dev->intf = intf; init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c index 77b1e76..6efcd79 100644 --- a/drivers/staging/go7007/go7007-driver.c +++ b/drivers/staging/go7007/go7007-driver.c @@ -604,7 +604,7 @@ struct go7007 *go7007_alloc(struct go7007_board_info *board, struct device *dev) go->tuner_type = -1; go->channel_number = 0; go->name[0] = 0; - init_MUTEX(&go->hw_lock); + semaphore_init(&go->hw_lock); init_waitqueue_head(&go->frame_waitq); spin_lock_init(&go->spinlock); go->video_dev = NULL; diff --git a/drivers/staging/go7007/go7007-i2c.c b/drivers/staging/go7007/go7007-i2c.c index c82867f..f9d9d71 100644 --- a/drivers/staging/go7007/go7007-i2c.c +++ b/drivers/staging/go7007/go7007-i2c.c @@ -48,7 +48,7 @@ /* There is only one I2C port on the TW2804 that feeds all four GO7007 VIPs * on the Adlink PCI-MPG24, so access is shared between all of them. */ -static DECLARE_MUTEX(adlink_mpg24_i2c_lock); +static DEFINE_SEMAPHORE(adlink_mpg24_i2c_lock); static int go7007_i2c_xfer(struct go7007 *go, u16 addr, int read, u16 command, int flags, u8 *data) diff --git a/drivers/staging/go7007/go7007-usb.c b/drivers/staging/go7007/go7007-usb.c index aa4a9e0..d988d05 100644 --- a/drivers/staging/go7007/go7007-usb.c +++ b/drivers/staging/go7007/go7007-usb.c @@ -1065,7 +1065,7 @@ static int go7007_usb_probe(struct usb_interface *intf, if (board->flags & GO7007_USB_EZUSB_I2C) { memcpy(&go->i2c_adapter, &go7007_usb_adap_templ, sizeof(go7007_usb_adap_templ)); - init_MUTEX(&usb->i2c_lock); + semaphore_init(&usb->i2c_lock); go->i2c_adapter.dev.parent = go->dev; i2c_set_adapdata(&go->i2c_adapter, go); if (i2c_add_adapter(&go->i2c_adapter) < 0) { diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c index 06cacd3..daf6b73 100644 --- a/drivers/staging/go7007/go7007-v4l2.c +++ b/drivers/staging/go7007/go7007-v4l2.c @@ -101,7 +101,7 @@ static int go7007_open(struct file *file) return -ENOMEM; ++go->ref_count; gofh->go = go; - init_MUTEX(&gofh->lock); + semaphore_init(&gofh->lock); gofh->buf_count = 0; file->private_data = gofh; return 0; diff --git a/drivers/staging/go7007/s2250-loader.c b/drivers/staging/go7007/s2250-loader.c index bb22347..5031bbc 100644 --- a/drivers/staging/go7007/s2250-loader.c +++ b/drivers/staging/go7007/s2250-loader.c @@ -35,7 +35,7 @@ typedef struct device_extension_s { #define MAX_DEVICES 256 static pdevice_extension_t s2250_dev_table[MAX_DEVICES]; -static DECLARE_MUTEX(s2250_dev_table_mutex); +static DEFINE_SEMAPHORE(s2250_dev_table_mutex); #define to_s2250loader_dev_common(d) container_of(d, device_extension_t, kref) static void s2250loader_delete(struct kref *kref) diff --git a/drivers/staging/mimio/mimio.c b/drivers/staging/mimio/mimio.c index 1ba8103..63bf2db 100644 --- a/drivers/staging/mimio/mimio.c +++ b/drivers/staging/mimio/mimio.c @@ -160,7 +160,7 @@ static struct usb_driver mimio_driver = { .id_table = mimio_table, }; -static DECLARE_MUTEX(disconnect_sem); +static DEFINE_SEMAPHORE(disconnect_sem); static void mimio_close(struct input_dev *idev) { diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 93cab0a..6c7dd49 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -39,7 +39,7 @@ #include "cvmx-smix-defs.h" -DECLARE_MUTEX(mdio_sem); +DEFINE_SEMAPHORE(mdio_sem); /** * Perform an MII read. Called by the generic MII routines diff --git a/drivers/staging/otus/wwrap.c b/drivers/staging/otus/wwrap.c index 4db8f6e..1de941e 100644 --- a/drivers/staging/otus/wwrap.c +++ b/drivers/staging/otus/wwrap.c @@ -1066,7 +1066,7 @@ u8_t zfLnxCreateThread(zdev_t *dev) /* Create Mutex and keventd */ INIT_WORK(&macp->kevent, kevent); - init_MUTEX(&macp->ioctl_sem); + semaphore_init(&macp->ioctl_sem); return 0; } diff --git a/drivers/staging/p9auth/p9auth.c b/drivers/staging/p9auth/p9auth.c index 9111dcb..b23e201 100644 --- a/drivers/staging/p9auth/p9auth.c +++ b/drivers/staging/p9auth/p9auth.c @@ -388,7 +388,7 @@ static int cap_init_module(void) /* Initialize each device. */ for (i = 0; i < cap_nr_devs; i++) { cap_devices[i].node_size = cap_node_size; - init_MUTEX(&cap_devices[i].sem); + semaphore_init(&cap_devices[i].sem); cap_setup_cdev(&cap_devices[i], i); } diff --git a/drivers/staging/rspiusb/rspiusb.c b/drivers/staging/rspiusb/rspiusb.c index 2f8155c..9b75c14 100644 --- a/drivers/staging/rspiusb/rspiusb.c +++ b/drivers/staging/rspiusb/rspiusb.c @@ -63,7 +63,7 @@ static int debug; #endif /* prevent races between open() and disconnect() */ -static DECLARE_MUTEX(disconnect_sem); +static DEFINE_SEMAPHORE(disconnect_sem); /* Structure to hold all of our device specific stuff */ struct device_extension { diff --git a/drivers/staging/rt2870/common/2870_rtmp_init.c b/drivers/staging/rt2870/common/2870_rtmp_init.c index 0f4c8af..3492d10 100644 --- a/drivers/staging/rt2870/common/2870_rtmp_init.c +++ b/drivers/staging/rt2870/common/2870_rtmp_init.c @@ -751,13 +751,13 @@ NDIS_STATUS CreateThreads( //init_MUTEX(&(pAd->usbdev_semaphore)); - init_MUTEX_LOCKED(&(pAd->mlme_semaphore)); + semaphore_init_locked(&(pAd->mlme_semaphore)); init_completion (&pAd->mlmeComplete); - init_MUTEX_LOCKED(&(pAd->RTUSBCmd_semaphore)); + semaphore_init_locked(&(pAd->RTUSBCmd_semaphore)); init_completion (&pAd->CmdQComplete); - init_MUTEX_LOCKED(&(pAd->RTUSBTimer_semaphore)); + semaphore_init_locked(&(pAd->RTUSBTimer_semaphore)); init_completion (&pAd->TimerQComplete); // Creat MLME Thread diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 38b8bce..7b61d9b 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -330,8 +330,9 @@ static void async_completed(struct urb *urb) uid_t euid = 0; u32 secid = 0; int signr; + unsigned long flags; - spin_lock(&ps->lock); + spin_lock_irqsave(&ps->lock, flags); list_move_tail(&as->asynclist, &ps->async_completed); as->status = urb->status; signr = as->signr; @@ -347,7 +348,7 @@ static void async_completed(struct urb *urb) } snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb, as->userurb); - spin_unlock(&ps->lock); + spin_unlock_irqrestore(&ps->lock, flags); if (signr) kill_pid_info_as_uid(sinfo.si_signo, &sinfo, pid, uid, diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 69e5773..e0dacaf 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -391,10 +391,10 @@ void usb_driver_release_interface(struct usb_driver *driver, if (device_is_registered(dev)) { device_release_driver(dev); } else { - down(&dev->sem); + mutex_lock(&dev->mutex); usb_unbind_interface(dev); dev->driver = NULL; - up(&dev->sem); + mutex_unlock(&dev->mutex); } } EXPORT_SYMBOL_GPL(usb_driver_release_interface); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 95ccfa0..167548a 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1880,7 +1880,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) * when the first handler doesn't use it. So let's just * assume it's never used. */ - local_irq_save(flags); + local_irq_save_nort(flags); if (unlikely(hcd->state == HC_STATE_HALT || !test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) { @@ -1895,7 +1895,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) rc = IRQ_HANDLED; } - local_irq_restore(flags); + local_irq_restore_nort(flags); return rc; } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 9720e69..b529a76 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -269,8 +269,9 @@ static void sg_complete(struct urb *urb) { struct usb_sg_request *io = urb->context; int status = urb->status; + unsigned long flags; - spin_lock(&io->lock); + spin_lock_irqsave (&io->lock, flags); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets @@ -304,7 +305,7 @@ static void sg_complete(struct urb *urb) * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ - spin_unlock(&io->lock); + spin_unlock_irqrestore (&io->lock, flags); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs [i] || !io->urbs [i]->dev) continue; @@ -319,7 +320,7 @@ static void sg_complete(struct urb *urb) } else if (urb == io->urbs [i]) found = 1; } - spin_lock(&io->lock); + spin_lock_irqsave (&io->lock, flags); } urb->dev = NULL; @@ -329,7 +330,7 @@ static void sg_complete(struct urb *urb) if (!io->count) complete(&io->complete); - spin_unlock(&io->lock); + spin_unlock_irqrestore (&io->lock, flags); } @@ -643,7 +644,7 @@ void usb_sg_cancel(struct usb_sg_request *io) int i; io->status = -ECONNRESET; - spin_unlock(&io->lock); + spin_unlock_irqrestore(&io->lock, flags); for (i = 0; i < io->entries; i++) { int retval; @@ -654,7 +655,7 @@ void usb_sg_cancel(struct usb_sg_request *io) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } - spin_lock(&io->lock); + spin_lock_irqsave(&io->lock, flags); } spin_unlock_irqrestore(&io->lock, flags); } diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index 7d33f50..2e2ae7f 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -193,7 +193,7 @@ enum ep_state { }; struct ep_data { - struct semaphore lock; + struct mutex lock; enum ep_state state; atomic_t count; struct dev_data *dev; @@ -297,10 +297,10 @@ get_ready_ep (unsigned f_flags, struct ep_data *epdata) int val; if (f_flags & O_NONBLOCK) { - if (down_trylock (&epdata->lock) != 0) + if (mutex_trylock(&epdata->lock) != 0) goto nonblock; if (epdata->state != STATE_EP_ENABLED) { - up (&epdata->lock); + mutex_unlock(&epdata->lock); nonblock: val = -EAGAIN; } else @@ -308,7 +308,8 @@ nonblock: return val; } - if ((val = down_interruptible (&epdata->lock)) < 0) + val = mutex_lock_interruptible(&epdata->lock); + if (val < 0) return val; switch (epdata->state) { @@ -322,7 +323,7 @@ nonblock: // FALLTHROUGH case STATE_EP_UNBOUND: /* clean disconnect */ val = -ENODEV; - up (&epdata->lock); + mutex_unlock(&epdata->lock); } return val; } @@ -392,7 +393,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) if (likely (data->ep != NULL)) usb_ep_set_halt (data->ep); spin_unlock_irq (&data->dev->lock); - up (&data->lock); + mutex_unlock(&data->lock); return -EBADMSG; } @@ -410,7 +411,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) value = -EFAULT; free1: - up (&data->lock); + mutex_unlock(&data->lock); kfree (kbuf); return value; } @@ -435,7 +436,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) if (likely (data->ep != NULL)) usb_ep_set_halt (data->ep); spin_unlock_irq (&data->dev->lock); - up (&data->lock); + mutex_unlock(&data->lock); return -EBADMSG; } @@ -454,7 +455,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) VDEBUG (data->dev, "%s write %zu IN, status %d\n", data->name, len, (int) value); free1: - up (&data->lock); + mutex_unlock(&data->lock); kfree (kbuf); return value; } @@ -465,7 +466,8 @@ ep_release (struct inode *inode, struct file *fd) struct ep_data *data = fd->private_data; int value; - if ((value = down_interruptible(&data->lock)) < 0) + value = mutex_lock_interruptible(&data->lock); + if (value < 0) return value; /* clean up if this can be reopened */ @@ -475,7 +477,7 @@ ep_release (struct inode *inode, struct file *fd) data->hs_desc.bDescriptorType = 0; usb_ep_disable(data->ep); } - up (&data->lock); + mutex_unlock(&data->lock); put_ep (data); return 0; } @@ -506,7 +508,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) } else status = -ENODEV; spin_unlock_irq (&data->dev->lock); - up (&data->lock); + mutex_unlock(&data->lock); return status; } @@ -672,7 +674,7 @@ fail: value = -ENODEV; spin_unlock_irq(&epdata->dev->lock); - up(&epdata->lock); + mutex_unlock(&epdata->lock); if (unlikely(value)) { kfree(priv); @@ -764,7 +766,8 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) u32 tag; int value, length = len; - if ((value = down_interruptible (&data->lock)) < 0) + value = mutex_lock_interruptible(&data->lock); + if (value < 0) return value; if (data->state != STATE_EP_READY) { @@ -853,7 +856,7 @@ fail: data->desc.bDescriptorType = 0; data->hs_desc.bDescriptorType = 0; } - up (&data->lock); + mutex_unlock(&data->lock); return value; fail0: value = -EINVAL; @@ -869,7 +872,7 @@ ep_open (struct inode *inode, struct file *fd) struct ep_data *data = inode->i_private; int value = -EBUSY; - if (down_interruptible (&data->lock) != 0) + if (mutex_lock_interruptible(&data->lock) != 0) return -EINTR; spin_lock_irq (&data->dev->lock); if (data->dev->state == STATE_DEV_UNBOUND) @@ -884,7 +887,7 @@ ep_open (struct inode *inode, struct file *fd) DBG (data->dev, "%s state %d\n", data->name, data->state); spin_unlock_irq (&data->dev->lock); - up (&data->lock); + mutex_unlock(&data->lock); return value; } @@ -1630,7 +1633,7 @@ static int activate_ep_files (struct dev_data *dev) if (!data) goto enomem0; data->state = STATE_EP_DISABLED; - init_MUTEX (&data->lock); + mutex_init(&data->lock); init_waitqueue_head (&data->wait); strncpy (data->name, ep->name, sizeof (data->name) - 1); diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 9d0675e..d20cb67 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -2766,7 +2766,7 @@ static int ftdi_elan_probe(struct usb_interface *interface, ftdi->sequence_num = ++ftdi_instances; mutex_unlock(&ftdi_module_lock); ftdi_elan_init_kref(ftdi); - init_MUTEX(&ftdi->sw_lock); + semaphore_init(&ftdi->sw_lock); ftdi->udev = usb_get_dev(interface_to_usbdev(interface)); ftdi->interface = interface; mutex_init(&ftdi->u132_lock); diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index bd7581b..99188c9 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include "pl2303.h" @@ -184,6 +185,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) struct usb_serial_port *port; unsigned int portNumber; int retval = 0; + int first = 0; dbg("%s", __func__); @@ -223,7 +225,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) /* If the console is attached, the device is already open */ if (port->port.count == 1 && !port->console) { - + first = 1; /* lock this module before we call it * this may fail, which means we must bail out, * safe because we are called with BKL held */ @@ -246,13 +248,21 @@ static int serial_open (struct tty_struct *tty, struct file *filp) if (retval) goto bailout_interface_put; mutex_unlock(&serial->disc_mutex); + set_bit(ASYNCB_INITIALIZED, &port->port.flags); } mutex_unlock(&port->mutex); /* Now do the correct tty layer semantics */ retval = tty_port_block_til_ready(&port->port, tty, filp); - if (retval == 0) + if (retval == 0) { + if (!first) + usb_serial_put(serial); return 0; - + } + mutex_lock(&port->mutex); + if (first == 0) + goto bailout_mutex_unlock; + /* Undo the initial port actions */ + mutex_lock(&serial->disc_mutex); bailout_interface_put: usb_autopm_put_interface(serial->interface); bailout_module_put: @@ -340,6 +350,22 @@ static void serial_close(struct tty_struct *tty, struct file *filp) dbg("%s - port %d", __func__, port->number); + /* FIXME: + This leaves a very narrow race. Really we should do the + serial_do_free() on tty->shutdown(), but tty->shutdown can + be called from IRQ context and serial_do_free can sleep. + + The right fix is probably to make the tty free (which is rare) + and thus tty->shutdown() occur via a work queue and simplify all + the drivers that use it. + */ + if (tty_hung_up_p(filp)) { + /* serial_hangup already called serial_down at this point. + Another user may have already reopened the port but + serial_do_free is refcounted */ + serial_do_free(port); + return; + } if (tty_port_close_start(&port->port, tty, filp) == 0) return; @@ -355,7 +381,8 @@ static void serial_hangup(struct tty_struct *tty) struct usb_serial_port *port = tty->driver_data; serial_do_down(port); tty_port_hangup(&port->port); - serial_do_free(port); + /* We must not free port yet - the USB serial layer depends on it's + continued existence */ } static int serial_write(struct tty_struct *tty, const unsigned char *buf, @@ -394,7 +421,6 @@ static int serial_chars_in_buffer(struct tty_struct *tty) struct usb_serial_port *port = tty->driver_data; dbg("%s = port %d", __func__, port->number); - WARN_ON(!port->port.count); /* if the device was unplugged then any remaining characters fell out of the connector ;) */ if (port->serial->disconnected) diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c index 5ad3616..125d19e 100644 --- a/drivers/uwb/umc-bus.c +++ b/drivers/uwb/umc-bus.c @@ -62,12 +62,12 @@ int umc_controller_reset(struct umc_dev *umc) struct device *parent = umc->dev.parent; int ret = 0; - if(down_trylock(&parent->sem)) + if (mutex_trylock(&parent->mutex)) return -EAGAIN; ret = device_for_each_child(parent, parent, umc_bus_pre_reset_helper); if (ret >= 0) device_for_each_child(parent, parent, umc_bus_post_reset_helper); - up(&parent->sem); + mutex_unlock(&parent->mutex); return ret; } diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h index d5bcfc1..17b10b9 100644 --- a/drivers/uwb/uwb-internal.h +++ b/drivers/uwb/uwb-internal.h @@ -366,12 +366,12 @@ struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal); static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) { - down(&uwb_dev->dev.sem); + mutex_lock(&uwb_dev->dev.mutex); } static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev) { - up(&uwb_dev->dev.sem); + mutex_unlock(&uwb_dev->dev.mutex); } #endif /* #ifndef __UWB_INTERNAL_H__ */ diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 471a9a6..7b09263 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1203,7 +1203,6 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; - struct display *p = &fb_display[vc->vc_num]; u_int y_break; @@ -1235,10 +1234,11 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, struct display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; - if (!fbcon_is_inactive(vc, info)) + if (!fbcon_is_inactive(vc, info)) { ops->putcs(vc, info, s, count, real_y(p, ypos), xpos, get_color(vc, info, scr_readw(s), 1), get_color(vc, info, scr_readw(s), 0)); + } } static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos) @@ -3225,6 +3225,7 @@ static const struct consw fb_con = { .con_screen_pos = fbcon_screen_pos, .con_getxy = fbcon_getxy, .con_resize = fbcon_resize, + .con_preemptible = 1, }; static struct notifier_block fbcon_event_notifier = { diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 59d7d5e..41cc04c 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -51,7 +51,7 @@ #include