patches/series0000664000076400007640000002735511160545347012474 0ustar tglxtglx# # base tree: 2.6.29-rc8 # #origin.patch.bz2 # # Mainline + -tip fixes and updates # origin.patch.bz2 kbuild.patch # # Needs to be solved upstream by BKL removal # revert-sched-changes.patch revert-bkl-cleanup.patch revert-preempt-bkl-revert.patch # # Patches which should go mainline # rt_mutex_setprio.patch posix-timers-prevent-broadcast-signals.patch # # Lockless QRCU # qrcu.patch # KVM - RT fixes # NECESSARY: sched-enable-irqs-in-preempt-in-notifier-call.patch # # Check what's in mainline / mm or might be # upstream material. # spinlock-trylock-cleanup-sungem.patch x86_64-tsc-sync-irqflags-fix.patch neptune-no-at-keyboard.patch rtmutex-debug.h-cleanup.patch netpoll-8139too-fix.patch kprobes-preempt-fix.patch replace-bugon-by-warn-on.patch # Suspend / resume fixups i386-mark-atomic-irq-ops-raw.patch msi-suspend-resume-workaround.patch floppy-resume-fix.patch # # assorted fixlets from -mm: # ioapic-fix-too-fast-clocks.patch move-native-irq.patch dont-unmask-io_apic.patch # # misc build beautification patches: # gcc-warnings-shut-up.patch # # Various fixlets # nfs-stats-miss-preemption.patch random-driver-latency-fix.patch # # Debugging patches # #apic-dumpstack.patch # # Revert loopback bh assumption patch # loopback-revert.patch # # -rt queue: # #inet_hash_bits.patch #inet-hash-bits-ipv6-fix.patch # # IRQ threading # generic-cmpxchg-use-raw-local-irq-variant.patch preempt-softirqs-core.patch preempt-irqs-core.patch preempt-irqs-core-fix.patch preempt-irqs-direct-debug-keyboard.patch preempt-realtime-direct-keyboard-sparseirq-fix.patch preempt-irqs-timer.patch preempt-irqs-hrtimer.patch # i386 preempt-irqs-i386.patch #preempt-irqs-i386-ioapic-mask-quirk.patch # x8664 #preempt-irqs-x86-64-ioapic-mask-quirk.patch preempt-irqs-Kconfig.patch # # Real real time stuff :) # preempt-irqs-port-fixes.patch rt-apis.patch rt-slab-new.patch rt-page_alloc.patch # # rt-mutexes # rt-mutex-preempt-debugging.patch ## PORT-BARRIER: rt-mutex-irq-flags-checking.patch rt-mutex-trivial-tcp-preempt-fix.patch rt-mutex-trivial-route-cast-fix.patch rt-mutex-i386.patch rt-mutex-x86-64.patch rt-mutex-core.patch rt-mutex-core-fixes.patch rt-mutex-core-mutex-fixes.patch rt-mutex-core-fixes2.patch rt-mutex-fix-non-lock-kernel-config.patch slub-compile-fix.patch rt-mutex-compat-semaphores.patch # # Per-CPU locking assumption cleanups: # per-cpu-locked-infrastructure.patch percpu-locked-mm.patch drain-all-local-pages-via-sched.patch rt-page_alloc.c-cleanup.patch percpu-locked-netfilter.patch #percpu-locked-netfilter2.patch # # Various preempt fixups # net-core-preempt-fix.patch bh-uptodate-lock.patch bh-state-lock.patch jbd_assertions_smp_only.patch # # Tasklet redesign # tasklet-redesign.patch tasklet-busy-loop-hack.patch # # Diable irq poll on -rt # disable-irqpoll.patch # # Inaccurate -rt stats (should be replaced by CFS) # kstat-add-rt-stats.patch # Misc preempt-realtime-warn-and-bug-on.patch # # Posix-cpu-timers in a thread # cputimer-thread-rt_A0.patch cputimer-thread-rt-fix.patch shorten-posix-cpu-timers-name.patch # # Various broken drivers # vortex-fix.patch serial-locking-rt-cleanup.patch # # Serial optimizing # serial-slow-machines.patch # # Realtime patches # # X86_64: needs splitting preempt-realtime-x86_64.patch # i386 preempt-realtime-i386.patch remove-check-pgt-cache-calls.patch #preempt-irqs-i386-idle-poll-loop-fix.patch # # Core patch # # Note this is a convenience split up it is not supposed to compile # step by step. Needs some care, but it is way easier to handle than # the previous touch all in one patch # preempt-realtime-sched.patch preempt-realtime-sched-remove-debug.patch preempt-realtime-mmdrop-delayed.patch preempt-realtime-sched-i386.patch preempt-realtime-prevent-idle-boosting.patch # preempt-realtime-cfs-accounting-fix.patch # PORT VICTIM: schedule-tail-balance-disable-irqs.patch preempt-realtime-sched-cpupri.patch preempt-realtime-core.patch fix-net-bug-fixes.patch resurrect-softirq-code.patch preempt-realtime-net.patch preempt-realtime-net-softirq-fixups.patch dev-queue-xmit-preempt-fix.patch net-xmit-lock-owner-cleanup.patch preempt-realtime-cleanup-dev-queue-xmit.patch preempt-realtime-loopback.patch preempt-realtime-fs-block.patch preempt-realtime-acpi.patch preempt-realtime-ipc.patch #preempt-realtime-sound.patch # ftrace-compile-fixes.patch preempt-realtime-mm.patch preempt-realtime-init-show-enabled-debugs.patch preempt-realtime-compile-fixes.patch preempt-realtime-console.patch #preempt-realtime-debug-sysctl.patch preempt-realtime-ide.patch preempt-realtime-input.patch preempt-realtime-irqs.patch irq-desc-init.patch preempt-realtime-fix-irqdesc-lock-initializers.patch preempt-realtime-fix-sig-cputimer-lock.patch preempt-realtime-net-drivers.patch #preempt-realtime-netconsole.patch preempt-realtime-printk.patch # CHECKME: preempt-realtime-profiling.patch preempt-realtime-rawlocks.patch preempt-realtime-rcu.patch preempt-realtime-timer.patch fix-compilation-for-non-RT-in-timer.patch kstat-fix-spurious-system-load-spikes-in-proc-loadavgrt.patch preempt-realtimer-timer-non-rt-warning-fixes.patch preempt-realtimer-timer-more-non-rt-warning-fixes.patch gtod-optimize.patch rt-move-update-wall-time-back-to-do-timer.patch bz235099-idle-load-fix.patch preempt-realtime-usb.patch preempt-realtime-warn-and-bug-on-fix.patch preempt-realtime-debugobjects-rt-safe.patch preempt-realtime-xfs-compat-semaphore.patch rt-stop-cpus-fix.patch preempt-realtime-tracer.patch preempt-realtime-btrfs-locking-workaround.patch preempt-realtime-force-rcu-preempt.patch preempt-realtime-perfcounters.patch preempt-realtime-ipi-call-lock-raw.patch preempt-realtime-timer-cpu-hotplug-fix.patch preempt-realtime-serial-console-fix.patch preempt-realtime-disable-workqueue-tracer-on-preempt-rt.patch # CHECKME: irq-mask-fix.patch handle-pending-in-simple-irq.patch preempt-realtime-irqthreading-sparseirq-fixups.patch preempt-realtime-sparseirq-waitqueue-init-fix.patch # # Various -rt fixups # #preempt-realtime-supress-cpulock-warning.patch #preempt-realtime-supress-nohz-softirq-warning.patch #preempt-realtime-8139too-rt-irq-flags-fix.patch preempt-realtime-mellanox-driver-fix.patch # # Utility patches (not for upstream inclusion): # preempt-realtime-supress-rtc-printk.patch hrtimer-no-printk.patch # # soft watchdog queue: # #softlockup-fix.patch softlockup-add-irq-regs-h.patch #softlockup-better-printout.patch #softlockup-cleanups.patch #softlockup-use-cpu-clock.patch # # Not yet reviewed # # # START of Pete's ccur-pagecache queue # # # END of Pete's ccur-pagecache queue # # # kmap atomix fixes # kmap-atomic-prepare.patch pagefault-disable-cleanup.patch kmap-atomic-i386-fix.patch # # Highmem modifications # highmem-revert-mainline.patch highmem_rewrite.patch highmem-redo-mainline.patch rt-kmap-scale-fix.patch # # Not yet reviewed # highmem-atomic-fix.patch #select-error-leak-fix.patch fix-emergency-reboot.patch timer-freq-tweaks.patch # # Debug patches: # #pause-on-oops-head-tail.patch # # x86-64 vsyscall modifications # x86-64-tscless-vgettimeofday.patch #vsyscall-fixadder-pa.patch # # Timekeeping fixups # # x rt-time-starvation-fix.patch # x rt-time-starvation-fix-update.patch # # RT-Java testing stuff # #Add-dev-rmem-device-driver-for-real-time-JVM-testing.patch #Allocate-RTSJ-memory-for-TCK-conformance-test.patch # # Softirq modifications # #new-softirq-code.patch softirq-per-cpu-assumptions-fixes.patch #fix-migrating-softirq.patch #only-run-softirqs-from-irq-thread-when-irq-affinity-is-set.patch #fix-softirq-checks-for-non-rt-preempt-hardirq.patch smp-processor-id-fixups.patch # # Weird crap unearthed by -rt which needs to be investigated # irda-fix.patch #nf_conntrack-fix-smp-processor-id.patch # # Needs proper fix # print-might-sleep-hack.patch lockdep-rt-mutex.patch #lockstat-rt-hooks.patch #lockstat_bounce_rt.patch # # KVM: # #kvm-rt.patch # # Add RT to uname and apply the version # RT_utsname.patch # # not yet backmerged tail patches: # preempt-rt-no-slub.patch paravirt-function-pointer-fix.patch quicklist-release-before-free-page.patch quicklist-release-before-free-page-fix.patch sched-rt-stats.patch # CHECKME: mitigate-resched-flood.patch genirq-soft-resend.patch relay-fix.patch #schedule_on_each_cpu-enhance.patch #schedule_on_each_cpu-enhance-rt.patch lockdep-rt-recursion-limit-fix.patch cond_resched_softirq-WARN-fix.patch # stuff Ingo put into version.patch #export-schedule-on-each-cpu.patch # fix-alternate_node_alloc.patch hack-convert-i_alloc_sem-for-direct_io-craziness.patch dont-let-rt-rw_semaphores-do-non_owner-locks.patch # CHECKME: rt-s_files-kill-a-union.patch loadavg_fixes_weird_loads.patch # HPET patches #watchdog_use_timer_and_hpet_on_x86_64.patch # x stop-critical-timing-in-idle.patch # rt-wakeup-fix.patch disable-ist-x86_64.patch plist-debug.patch seq-irqsave.patch numa-slab-freeing.patch # Peter's patches # # workqueue PI # # CHECKME: rt-list-mods.patch # CHECKME: rt-plist-mods.patch # CHECKME: rt-workqeue-prio.patch # CHECKME: rt-workqueue-barrier.patch # CHECKME: rt-wq-barrier-fix.patch # CHECKME: rt-delayed-prio.patch # CHECKME: sched_prio.patch # x critical-timing-kconfig.patch lock-init-plist-fix.patch ntfs-local-irq-save-nort.patch dont-disable-preemption-without-IST.patch # CHECKME: filemap-dont-bug-non-atomic.patch # CHECKME: fix-bug-on-in-filemap.patch rt-sched-groups.patch printk-dont-bug-on-sched.patch user-no-irq-disable.patch proportions-raw-locks.patch # AT91 patches use-edge-triggered-irq-handler-instead-of-simple-irq.patch apic-level-smp-affinity.patch printk-in-atomic.patch printk-in-atomic-hack-fix.patch slab-irq-nopreempt-fix.patch swap-spinlock-fix.patch aacraid-compat-sem.patch # Luis's gtod updates fix_vdso_gtod_vsyscall64_2.patch git-ignore-script-lpp.patch sched-wake_up_idle_cpu-rt.patch # Adaptive Locks and lateral steal rtmutex-lateral-steal.patch rtmutex-rearrange.patch rtmutex-remove-xchg.patch adaptive-spinlock-lite-v2.patch adaptive-optimize-rt-lock-wakeup.patch adaptive-task-oncpu.patch adaptive-adjust-pi-wakeup.patch adapt-remove-extra-try-to-lock.patch adaptive-earlybreak-on-steal.patch fix-adaptive-hack.patch tglx-04-rtmutex-unify-state-manipulation.patch tglx-05-rtmutex-remove-uber-optimization.patch tglx-07-rtmutex-prevent-missed-wakeups.patch fix-config-debug-rt-mutex-lock-underflow-warnings.patch realtime-preempt-warn-about-tracing.patch sub-dont-disable-irqs.patch raw-spinlocks-for-nmi-print.patch sched-fix-dequeued-race.patch lockdep-atomic-fixup.patch seqlock-01-make-sure-that-raw_seqlock-retries.patch namespace-lock-fixes.patch sched-generic-hide-smp-warning.patch seqlock-serialize-against-writers.patch seqlocks-handle-rwlock-and-spin.patch ata-irq-save-nort.patch fix-erroneous-histogram-stop-warn-on-messages.patch # hrtimer bring back the hard/softirq seperation :( hrtimer-fix-wait-for-hrtimer.patch printk-tick-move-to-softirq-on-rt.patch hrtimer-fixup-hrtimer-callback-changes.patch blkcypher-fix.patch rt-res_counter-fix.patch preempt-realtime-paravirt-spinlocks-fix.patch # x86-32 highmem related fixups preempt-realtime-x86-32-gup-kmapatomic.patch preempt-rt-x86-pageattr-flush-unused.patch # general valid on -rt not only for x86-32 highmem preempt-rt-scatterlist-nort.patch netfilter-rcu-destroy.patch percpu-locked-netfilter-ecache.patch #debugobjects-rt-safe.patch disable-maxcpus-for-now.patch x86-microcode-fix-sysfs-wreckage.patch trace-disable-hw-branch-tracer.patch rt-spin-needbreak-fixup.patch mm-lock-break-on-bulk.patch page-alloc-bulk-moar.patch pagealloc-cond-resched-rt-only.patch trace-setprio.patch signal-keep-a-task-private-siginfo-for-rt-tasks.patch cpuhotplug-idle.patch cpuhotplug-page_alloc.patch cpuhotplug-per-cpu.patch cpuhotplug-vs-slab.patch slab-free-pages.patch lockdep-compat-sema-fix.patch x86-vector-sparse-irq-fix.patch posixtimer-avoid-wakeups.patch signal-do-not-wakeup-self.patch version.patch patches/origin.patch.bz20000664000076400007640000370554311160545347014270 0ustar tglxtglxBZh91AY&SY!F#dx0C}^`CDzv;;מK.Nmm%dih@zos[`gpm\ᅠ-I,ѺU`Sw8{oy6Wo_+UɄE;\Tl(݀h(\ ()V@F %Wكx&C'g]p0fCo{Aֽ6נ9wz V (݊t2]Wn}lt=z BemCCw@7{+d72Ql8:/"݇1+(K}}(,Y=ow JRADH*@A ) D(PT[4ʂRWӋM$4=4! (@( BB@PP H(A@P 8pOA˾=Zקi<I=v[v! =dHt e2B1j 5I wk}Pw%tr룷tul!S5]C^n]3s@ բ@ҀcӠ;o6裮G}b]EUWm|}:K(P =QYzנpkz_`g-TuuT@ gwQ={X7q/w8f%`=}{m ^ > Q@(Z ׸qˀm&te!u:LA3i)3{6IPl7:;5,,Z&}_|c $>] 5Pgϸ0slw )V4`ž(_6n@PU⇞Xyg\ tAn<;l6`ޘE @ {[/)Gˆj}@GLh ScPz[oO:hx@)@tr4 D)*U(sEM}A׫ FTޒ(y|mJk=N$h( m@D%ֱ @}k^@U7۽Ž┶`e-jי 9VR$TzJA[ll.O^O{xjP F:PzF@;l$^v8nmw< fۛeӌopn󳽫{=uD/ @-wrrFMXNG*`1$:ٮ!,eRGJ1z\'`z4޾ZۥsD>}{>jhzh͛ )[fi.MC5{x5N^پ钨TE% oM@y> @B=W6R- wR`6 E WZpr{>.7pwnOw7a:t]Fy3v7j AղawbnH$RuTv2 A}vvo` %AEB(AC{r9I*iHPJۻuR|3٧gqdTzVY7qP;vhQ[FfG17wpI@@Kk I۸1hmm-k:5]/)G=砉Dha 4L(z$R"#FڧzO(!$&&&=OSb)MJDBL MbbO$MQI@MM4 )L(4қIhтD@DBb4LCSLhio0 8Ϭ2}n!2?/0Nڍdʦмy`< *e/I("l;&d;7OAĤC BDQBL?F*Ol؜Ĝ@B,dNaL$#1 sd5Aw)0C2z4֔t~~DDU AH5 4+Q}l2IӠVhB\bDf$@lP% X-@Ax$@sCo 8zH _._|J CY !i`7"?FJeDd9!'I_HrSHDtq(4X/hjH5IEFȏP1J췫-X/_G?JD^UG5Ezx]'RcKiFl_ݟ04z)Nze\'C>Z4I- uuoF=?JB~whU&M\݄\BDX*_OjCSB2xz^Y#-UW"\}\|3hh9ݼPi4d >vVNNO20r8JJIʚJҐqѣfJDa6Nq}`.Q/c>ZrZa l۔J#q&&$Wǥk[(ú9Eh21&f@t1Vc3h~]>+Z|m>W~1`-V1e "7G L`8 *TmUce_G4IiWk)v!82zFBI~gJ!%{,*Z2a8Tdl:.d 2G?Ōl&fU Reƀ6.idZ昨p~E^ݼݜgVWv'PHaP޲Uf9ѮJ)O[0~UnB~o\ "H[UZ^4&<~s=duu?}I$ ԑ#S,BQ$?i$;bXf$"TvQǾ:g_XI295UbAU R σs4hIA+'JwOr>h2wW \M5KxA;arz83VI*.'?Vz:_aW~+;,Ι ʄ@ѥ Btx5йEg#*i{byUTwu!y*SO VTk^-U^]MNgEAB Ewd|k=i_W yw-#R15aUn _{}툨zU߱0fT.}# :Lv}+0fEk AHZ{Qj* |M5 1Lg}fcAgy ?mѾ-I j56KI Lj#J?0p}>xz쾉x<蠊1 ,*mOF0`1rCKFNpU@I+3;D`RR& %N9p8h ZHiCJ^>`&10IxlE4 In'C(u 4(I5C @aꪪ8SHI9* !Ye F&O![GՌ/:.qp\$qےX82IĄI2AÍRI>H~S'g1`&ĩKZS2]`?'?&nYNZP]x/]g**'#g*%fqw4W̄ y)+2C]^/pXڟ*7f}6nq(ޥsKbZyO~$$}6J`VtJ#;$&BQ@A8!Y186GAp󪈓8$I4:8zFxIA[d '`av(q:N)OdzcĞr4D$9S$'$63$4BiId&d8Մw[?[O,?:R2v8:FHzo9;ڨ&CrY$4샖*ZOO_wypd)z;'xK|9He"> tL4tuaH)_/I}L42{5BkȓLn3x'DOT*ChT^- Z)ЇehL[: E3SEQs&\  ^ &߿.VK ~?Nw36>8G(VMyf|,,7 |,s 4xwDm`1`J^鲺Ch&=cvOg0vOWX܊DU1gQrm^]Ej=Wn/0(⬢ߨ\ΒuUu"TVȖ&yiHL,sA'ni4A.Lq4qj>⠴Wcv |KNR1 ]TKUbJn?`G|0q *!:k$UCP%"=]e3(*^-0HK飫QWh4,B6XFUpcpB%-B Œ -q,ɫVk=qnci<,ˤeT $\d==ꞙ77FAT'=\zuWgRۿw !*]}i}UKZ@QIJ4oeg}X`C\|˪n(*q'27R:3ģRZm#d]  J%۔"BwsN^ӷuKji"dT@v2b ?naBW$: ZD5~szhԫyաYd2BUFFnŒw֝]5V/P82Jʟt TME0*E5f>|pzIm0ƿ֫z:gR|cweSJqThIc>Ѯ .utBpuV*#-<S03pU4Of/V2w%P Cj?L'~|9賧w't; BIHHO /}?EKujwk~Ca D@(}'}?RP))S4SMQ*JqPf.P#r_?Q@6nw=v!! B2t'2Z65'1"1.I(4)%BZh:U.^pR2@(RMRD 3PLSQQUTUdS HA/xBRE"h[}Aw?p9eHjv_OOb# )!Cr_WMZf7W n}FToOJ:qqMpoett\9.3cu>#,}FntB޿a7l( xit>]l&4/O袶ɢ@=2iǏ46voK!uDAx&:ASD:,A%-v_BG|=mKzy AUUцUnϝrct&!*hMd.7wa:KrM9$^ QcMQ.BB3mO5S  x?ز [~ϮzӒ\~/mIR4̹0.?=%$1 f #` AVQ'9 Crsu†"!ăLcm5ZsÐhڹ_?g׹ hbcMStAҀH64MN1H*LEDE$.upp1IU<ܟMؠĚ ;AX 7r$J@/eϟL{Lvz6oқyFT![onWx*NF:ܫBh2BY>TU 5Arű<~t~{?$S#:Ys>tE|RE/YЩ/ ѷ[I18 K>>T0\ɫ;+-Qv+{f۵O2}{4ooN04V z\qGYm+^8u-fQl[Z{|uoDn^_CoU^u6ug #s^ve;>Nmg޼mAe#~E%R|V:e0fe[,j}![,cmuWͶ0LN-JqӤH=ۦ5kZݣn|4_~Q\-^Euڽy"sTh6Hbb>* 0 yp % _4ۮ<`Zpâ8Ŵ]GbPʦ2_໾ʬݖf=" 2iR?o,Z=^C*C[I9V`v2k]j(-+VK"[VY1B꥖|ޛZp%;neBm+Blg},|^EVz*LAD/~B1ݭ+kk."JLWT]*T/ngt}6>Fxa} #;_1jet~r*ۃxgzޫ^ O9BB :ϞY6\`L`њyUt~㍻8eca\J}R6*XzH׬ڌ**vŬf~uEN Ʈt,"Wlܺ5Iyvu߆{Ds yG%Kiv$E槗Ҽ}_ˇۯG|MO>~aj鵣*lRuS A[jtp-L!Ć:VP5.Z?d4YQW.=ap{Jʨ/?bvB#pWscۑ7oƵJ U"b @64 ?>ؗf2$!b b/y ]7bTM`oU g' $[Es)>Q˕AHbnYh9pn:*1ltF 1"hc\G8qNrO&QO:Ð8sPܭr r梎cH[bCl[m;0u"(J ns/uku0:G* -FVCjIIO#8\4BJ P\A^; c3=OṈt-]q7ߞ?r[aSN{ pG]Ik)M}(__}?˫Cjqes@d@!\}1Ao?wߏX߇]&z8.st@jJk`0C;CX !آ1kZv(n9Á j}3y;-ivōimx#AP 4 Is W:8FyeᦪzO[Q&Ew'kE}@ TT+_o޺7g *4jհD:О6]ysnxN\p"Ռu2hh[<*{(䞮5hvxj(w$>-TW]ħg殜\cx.mPog9W'Kkͫ YsR[ k"lmYc#T5o_fk'E~܈?mTH)ޢ[mWl".;0E_aB]QQo^9r0\|`U4V^if,l$ȫ MUwA{9UVAnai}Tz~5{+8V%ɕfp?mIGt)Ŗ`|:M :_[n>UUv ]Tm\|fw=D¬/Gm<{vSF}{jyiccNL5M|:gk^1ԯ4kؕk׹b|k Ab+B^j[F/D31Ώp8]6k'5%b7iŲ BD̢MaƢMJ]qK=_ ji@-V/*63>x~ؤrp%OO2T*ZH" WFށ8LTňSjviy^7UV$Pt3?kA3eZ˕~ eޫU5GrAyT2p줿ߏs&wp)d $R|E'8T {k6`gjST"E0DtpI Vt?חgո=%1geI49# bP7y! Ed vw(S; P"omWT$CPUʻ.Q wטğjYcH!4Y_~UIDm*bs[G,g"d? 1~M-5ԩMpҁyHR^t uH4sds4)Ζ1_ 1=G1b =N]YUH!"Qʈ:#0M 4@?I$XBZq>]Ykz7دeґğIHsDzKJ''ӌGΤ/S?\irf(z0m㪿H%6.IBi!>L4loⲭ7N&BI]wU9 d HJ&^Kj̰vQoLƄW^o,~ 1]AzmA~A"XzQzmVpn`FbU=P2bYq:C_΄1 :{;3&SG13GfgT?xMDPRF$ >N/f=4ŷ|S|$U UcӮMCx:; xc"ŋBH,h 4e ?p|7oa9 D9VE?O,|Wa|7A—u|QBeTp >܏IU9B +wuy~UTuejR~UJ~;?_?'ug2Hrz$ gpRF6L2{5;22(8ёk.AQ) "UV9Ә=f[ܢ*vԉt6*]Ξxݟ#pW؅(|Q*Io+U]TG32/FأYXMqIhϟ NY\TTDuft\/ E-7yYD$&˪~ PqA9Ϻ xxgo- Xh[ L.BP,^*s?x_):-v8p*9aO,ƕOH#:_;C!y~G 9tNMay D)?UiVL(ǐrٚ9_a ̲1ңLNI*:kX(>H:j~[( !=-K^CǶC@&v'kFSd& bƘӮ8smvֱ"Y?WSeH(.~`m"=c #%Um%p?F7ȗ%_8'E )1٬G;f:}x%;[N-ZFBOtOyҬx~>f&NS[q/U"ky-O=Y _"O o4}=ysM*Y֣p[oޏ_itKjeٗdN~`8}v"* _mu%*5xveשB>BQ)s9oo1݃YmnOXЗgxmNNAz)l3[8;7"ozi䢨5VP)AM%!E CK ri"P12+4)T~[?/ólT)* B)T?_bǃ[2bxvvZ+L?B eO/O}̂R:ޱ7)qRJ!|H*5eM&ZW]V5G4E46o7IwTQTN]_au4GPq҈@*@. Ӽ9V᧵]#^ZHUw (7_?k?~ecgtciѐEMLﰟJ1EdQQ32ʊg]vI1^D'&(Vt"|) =%eЄTjչ߽?wz&pZS}\73N֐~>lU9{qzF4"JV`s$1+) sSHG-]Ӫ!\Km9yc `ڠtZ|ꭇٕc"jIJȒ~% gqE_!P)tRgJ\u$FKh80dxyT>)=/hK\ul2sLj%y1 %eNr}IQm#\%9 hf]2,UZa %Iz(&=ƶZx ,sHu"Ib72lCPW ̩,Qi/[^6gcĦ}OM(1ت=]c=F:ᠹ76L& s*w~dUXDO / azoc1'0uc$i 'YꧦDB? ?>˺Q0is$dТ `@NՐbN+,!}ӍEK2,|8d*CU 2R(b ?^;M>cX@X_t:]V}~p&V4|0bQ  _Jra96BuO]u9}JO729ָ˗ULwVK _KKy^,sY]nTj}⟿5gY#:ssnD:*P;-<:%K'BVO6vXQ0"@ᖍnF(UtNݿ"ZPU[. \ea^ڻ*1 R$8 C3Y!_)-ut$B>7B{m@WRW**\ 8⥪"=-pg< }gO7\{˘:eӜSaث_SQ)I .xYK cv̓1LV-{9qߛMhơT.1#[+ϩL?ȭȂ|/83^W9VE#Ig"n\aY{CNuMl}3MIt8q⓲d/~ |Eutxa,VggU%JkC|z\iRTjߑvu=LokSG1nښ6$Ju&N!RPV9&5y'2!^+\܊]b'9*-ih+1]Nݝ/Dvz1&Q-^6%iR׽'rFo~W?HE-#:Ft˙F(vc5/BFQ5kUp⵷/N8VQ[&=^y91DF~o k*C(ÃjQ5ݛs+ܓz6uJhWijBؒIRPxLV΍MGXe7]$wE6VW:+ ]W<saqRn[⸊xo۫jG.'G|fA#c5*24> ]"JITi߾$1#%9 L,p0ltA$ (AbʫpEd -2 sl]qEN{N4.Lj2398 iiPAFAc2\N4NJmXLjxYVU#?&\g4=qw sFj9& ty3 QQGӯD,.#Ɏ`):E }}ZIR 8csxqg51^5S*O!xlKL$h0/jfL銒8eF3(;,nSW,tFz)C4_ *L'Nc-\,Z2z,,? nn`2A1 >8#ljO8NO.q'c~ iŁ}|@T1bz0Dx'VeHXtN$ Rr!q o'h{ՌQۖI$9I7ϰ@`|[飔v&tde;aƗE. ǭ9"r_u:!0i cb9nj#*W?.M#U )q`<A>Å# QUã\64Ôi\2PQdϊ$ё,u4׹1q9cG&sŒrJ_LۊO/@970҅'rb C~2)]sId1?eN7~%XKNݦBK)""JUqT?(:b#Ynqib^̀fmR9^aGQtYF?#Gr Q74/7 }@:Ʌgz0Ad.hB;ŪT0̢^irRqՙA{Ѣ /"C眏Xl7sDG=MZpKUda.1J M' ˉ42X<Č_4q>8ƍP[m+ L7V8dS=j.$G(ҾX.UwY]5M]g(' VȭbO=埩E,8+YZo"G.Ś11 PwΉO 1F lt:c659?ᥝ>γ7[~:/?E|A˥ Q@Ŀk v悆x,S0wL) nO a#5+9{LBR>/a6JNׁdf$oiʋUl|zV$cP+]i?4 _1UK;AxAzX)X="VZT )%d%Qsdɕ<:u7m',,D,(uş\0Ჷl9^/>!VjM(q1a_f 5AJ3yJ%7:WE|:Q[J$×Z XF15H:c2t\0Úq ^Ś˱9C!@ξ P*Weu~;2\*qV`[%gqq^5Pd cxi;,ogt2'Ӿ}yeWX蕃"HT@TR$@TԞ~^ZHB_TjW u`s03&sGD5EiwZZ |dh5uA.5I])+@ᕐhKm9 W,jnA o*!)~T.ABkimf}erkLP-K//Ԏ*5h-{c%JGR%uQZa_:"b ~i"whGsq͹Uz1%a!O"INok,R8p3=׵ &]X,"p$e_ ڜ*q2ysFH<'UiէC810r*U:4U.>'i-jSjn8]KL_+Z>.JL̟"]52KiuphHe6z:hLVL\$R %UU]:5ݳPj1nj-77%3PyTZITTvMFh^*QՏޙ#~#=hLl:Pfe*LFRU"0ڄJ/Y䯭7Ai|Ǘ_\SP[O^TXtmJQ! ;d{n߮5cW +BvhcHU 02 !H%=S7I#_&3䏅ڹ7;;24ek Rܲs+ۃ,j#u@34;r 0l5e񮨆c+hg:[ksOeVRO5^fjzj^DdÃT46f'9il٭wy̲㇄ D@jgd4 E:IHUWgA֧ra&PcEe?F ?p Go?9e?t(hLxYlR*Qd9E]ORP \R/*?,>ZP# V_횀ND <~ԇ4cUCEg,li30A&fWW:3 F3vm0 %_HJG \4[3ͤi ZKW]g)>6ve᪣$[w;F.y08KC^i7z⽽pOM9ӵ9]PTcjR hDVb:E!6p1G][ A C,AМ,聄gTűrw c$mE9# !gagF:fg0APIpb:gL 'l!QURjzxPw!9>Y-+TAh=1j$`q"ad[,E@MU%SG1Pfp+`f 5rVJP$p:oLx \_r{czo3TxGpu^. xá7X5GtG|N9Ry]g} =vwCyt$m5UV"goC( P4qQ [UṷF U)"M#h*bUC;eZ]j![jJfFq:3 h;vʂH*j9 `j(BC,IRz銆0IU<萦I!G.(Udu5A9B)!܀iBĔ :M:Ld 9sM'Y9RsY؞@€c@9%X!XHI$@֔""`MI%$aī88x2ĆI$" $d(%"H"f^c'P8D!-&4B x}L $20H` di0%( 3̼aсѓ@KA.¶pH!$& zhT:& BB 6` dq҆0HqL51 l4h&̚(FH02 % BFgl If$0aB`{;'VNI (:sfb&ID!L"LL!ɂdY)!%I$HP& pS ,A$5\T:dCB,`030IUsҁ I)h&pa2@HO8=Q0S0 L2 %-I!822@s,@BJD: ,F@-mAe4؀lI@( Hij'aD#M"f#c%L 3hs>ۆ>=Z$I#*d%Z=.=zRu7U%]a} LbOVk+믽 zw;j&x*$TcP#6 R=}z&UVf;u3y Xg Թ\iO.'(9Zojo)YR%e]W-ө0Uћhv'~:ͰI"k6icn# *ȇpo?O4L8_U$;?g['Aot$N"|3ߑptP{}N ! t+iBV_Ŏ,*JZlFd2/3E/9q%Z*2lCC@OZ`;Q:+CI }TDm{nH?bzXDGoY oS#fvV1ݰvvcׇ~GGom0kfRTlJ:$K r.~HEH2[lzlݩ1O{O$uttw_L.NeLl.A,g/k\_,!RcZ2V#0,<hoq :ă :u%d4jY@FoYĸOIvS;$rED0BZ(DZ?A$,?/ִzHҊ D*B+)os!F M{_ގ}rU j S_uZ/"e 9Pau_-C8Phvgq?[|Ɣh3(}{D O5ܢV Re5dk5Lu!3׫S{ehq4)eCz 6AoږeKY'b?t3a49YS"= jm,_hk. )QͷHJl'sVLt6"9"E(ǓQԫbX:F!\hK1G6l[xHC?s}?sڊ*>6ö2'O~$ȸen]&xJ#$P6d ,eѵ&O]֫)2ug(n3ZfڲjQҍI ˺¯RZJuCi tiytV0j,\fAYlxeODttv8Sg3ᜐB6gyf-z=|7Ӷź0_OqZ3w-afYkg^^|:4UUUQUUUUUUUUUUUUUUUUUUUUUEUTUUUUTUUUUUUEUUUUUEUEUUUUUUUUUUQUUUUUUUTUUUURUUUU_֧'asGq%~_:tKS6~jeW4ֈV}o*K7s@Z4 H<4X<;`%2ceQnhu@HQhg,KCZsa"yW$i> s}"cL΢hׅK#Kw :7xbEe/' -XRcix/Db#\Rݣp_LŃv=Cʌ;T*pk]!3Ϟ.^eBlq.CR"&x'SjYslc5R1yQ֚!o!ΜH?CyQ㲙1ٵ=k|}KL1+y$|(,ov!TR[[ayv!|z[c>~/$zl-4˴=V UJp |U:QZl[?mf:Kh"+.s<Tf89gyjTKDwL⦆ݫTZ -5!5 TFib"gKR=_ұ[W9ئ osd:XQnT&L5X"oSbE8ͧiG"AGrD ŕ(s_t)LҗjȊ5jZ^Lr_M1j(}!WD<^ϞW6tdag]ipБ=,Y7I 1'9[D:^ to $KvdUP[C?fhvZ_ڪmU}VVr*kU_eVk|]UUUUUUUU˕xUUUUUuww]]UUUUUUUUZUrժk9GmCD1$ލѱۤ6HrD[c2ЬC>xX_&X|r垜_>i[4Pi1T@Կ4Ø#2_"ѿ\G?a "*ޑ~E0Ql2[%D^,Aز Kvc ,O|>uT*b+RPjʦqWmPBz,HF@,\p߄ݫ !Ǝtd:%I΄Rfܷ( h+1Ǣ?h_27*A e tHjL,LthTRСf`Ql͎Ƀ|T4)J,*%ȥDXcJi5G_L87?"¥+pr*א*Z\h=XFtb Q S?tFXWZ+<{7oo||?pLUUUT*겥5AS,9T {PkJEhK=9u0ȧ3JcNXmQD n~YQnCyjBO)I[߷D Pc))m-mhԼannPdY2N`!v, e _w~)]vx!$BԿkRѤym7:j`EL/'>*$7(!&VgeI(6`yw۳鞽C'dZ~8&Ġ8Kfm7.k]R0!9]syUBHY%$Z0?ulhVQvMvH|lRāOyHe'ȢbzGѰ=b̨<-Io$'FYSƹw'[Sw ǐ*glSrёf_'Z{al 6%q},t{S9bEYS)~KJyIǬdS?JRPDrΎ ÛU?pҗ@cBhGH#˿@bHm/qϺ ETQjư"/\x'<&*dǣV\VD[$1T@{-m `% 9Tҳ-TJ,˻_4Hl"XA:!} NKj⹪ OJ+VxH')}4AsVge CQi$2l٣ 2,)8bQm6$bJ*KܲȀmP{=)uִMG&9<6Vh)(`BH7M=EA9pP+}'ԑ"ദI8L3bwT<<,x.q1XBHN<r?V8qsDOH7]:aئ*UaNI;4X!o9"JJ4UIU , [hVHUY'Sf=QqS|\/Y tvpvMxb]l8uSEB/gNaі~SV elu#JNh JSQGBldmLAMQA4n{%zZBbzGxJ!=ˊh$IBSD-fs3]$bK!a$Ƨp|XJř+B([_keO; =JGɪ/T Q yGVϛ2U yp9j8NW }H.`?T(,A1 d0?oI'*lI,(^6*nи~{#C2)H-kM7cљiO w,8?ԌG/ϭ;?D0>>M$Koo IO(:-g)N*1]QDZNȉR.hHBd_vrQxiJtQٸzŗNm51wFz0S%17^He1MUx8@i`)Z / >t\׳tm)d?e- W(t3zm:k,X_W9̴Ԭ_VPRL c1R>'sWȎ6eijuZ@hX=K- ~e/1uU,O*p?.+M$ARڌ05%r_CY@b5kBhZHI%/I&T3+?LN>F%rj)H?<+8q7I7-' 6kw/烄C4&BD0ҁfP[m' /ܛ⠬- pؕ 7j(Ca5)o2FıdQ+^*X ɳ-y؅(i !|T ˄,M U KpqgiL; Փ\pOg'Sl{u=.%eqe}D]K5Dp2Fy)E͕5eg jB5A6"[,BhI hh;C%OP䅹a2Ь#*G4BHd v:ascwlX:1:%.C-h(Xi/  lLъnawAN fq U]X*T֐eݎ H[K+ + .8E]!4-0Z&vmٲ5$dKfmɱ$٢kemutj C-tmhB,J4[ŚpCŽwx%+2a9'lMެ]\ᯕí֔|K%t[0tx!][K 6w H9낮S(TUfaahM19B0ZC^ZT5rr3or!D5#WReU!-a i nHƫ`ūCjJKdX&m}\ԹHk f:qPe0ǫqg|Tud)AQj>zR`o CWGgXu 9>~ ӿ1Krv'ZnCr c 05nrukZxw=D!kl8oۻrc$ynj>~|Rߋ_sdձ5D0=3EL7nIZ&nv$˕%Ӟz͜ ܚo Ebt0ᨬ+Ңp mBxS׼azXI6RGd-T eB'.t Y.z7BMp-9[W rpp%2E ؜Y KҺVZ5Tc45髚3 BTNhj(̆ 6UXFФoU}قkipt4©1c|*jޡnQ0At-nkȉK4J.N{c\m#"dxelx[*"-C\Wc: T2AbbZ$ˋ۩twMۨL2iπnm\KRGtĦO͇5^&\l-4XpѴ~|6m&OraTv٫ Լ)A4NHv.q`mg+lR,N,M\qe_Vƭ5RϮk'iJ ҋQ𦜹:^Nzkg}' lZg aeKE3fvc»8lnc}{UA)Z$ET 5D044P-L"~ہ#XgȠ!&7֮D] pk6@/}LaUsB X{NK FttUru5s7o=_>ZIe}~KiRB$ U*WN<=-Y]McKd!f'Қ!D6fh4zC:{*Q$0\)!es{d `uU=$o/e1]ё̋}t*H$KymD$**qۋwsX^Ƹ9B8Acs7;޿ϿAj~V#uZ춭w*BɐhhBg1N)1#JqIzw_:OO ȅoDB(Z^TS 5kș1.f6GАԢՙGJ "GuvОte¼)6 mҌWřK&TNkH)PsSQH8h uLlM EP$DiPF[ T&2E*ZI#=*-]b%IcZՔjqȔcI}D!$nvr!90҃~.}=WI4)qn*\E(R*Uix ?CSG҇: X[ó؞eI.۠=ğmLZWЦY,O@,70icdҏ%HJ>J]*z3{~n)bv^TcjR eMܸe*yUڇz_ٍLJDq{m04FFA5Դ|Ht!M쟥QP5A$JHl3gD ilSQ#emrrr7 V?B_qM% #~mO8C &ě*l Ņ۸̠:׹UAU6Tjm5AX~xmeaYXЁҲ>qMV/Sybud~+sTB.d-$POыS[y3/dļDFxSNG nOv>IpLLDU:aj³g':ЌƮ0s1%6岶t 4폨^"sE33 Qe~^ 襁He8Oo͖%(Gp$j;~; -z ۾y|AS{yd7GʰfTx5t05em\GU}Ǚ/y4e: "IZY\;:Ab{3򲓩 J~TJQ2* *| :)4gSV/{q}D%~]Q}*JTb%w'^XrT|d%EuhGx Dyq` l1G(/4/y,zAOWXBR'obWbvܯfnhswA_~ZɷhY݃\_ͷ-V̞ޝ)v:=,dcm$͟C= @;Go2:JVjÅa$j Lm/5q 9fE6Ao̮EO%8a0ZE?#QXYS5M56i"k"WRQw+6i 23AokA$CIJJB:~?Cp.zQ>Et@c Bi0:-i)v9ۧ7#uQ)ՇܢeeS캩|۲(ªf@ے2l3Q3騸* 0x)Fҭ*#i7j%,@_|ť JU>s$`ΤVF8ėz?[a YM j|<%e%W!Ͷ5Ǯ-Kt"~>wH(-`\DZk16b'9<:hO4!^f8[nү]\B4CwȥBm?eΈ i()\\bXGP1|C]}}kB.~D0& RuS,[64/[m4 8O5ڄ4GT3#ꚕ+8Y )ڟƕNuNm6;TE:̨!+t}X 2?[x܈,E9T*5?Ь|1la߸ `ʏ+^Z$an ;HVsSTPm1=k{4Q#Fi2 ~_sb"GB8Zni(k/oyβľNL2Щjt9z O j<GB vtӎ V *AGIlt.̕3_ Te^P𢭕v|#wu#?',~=MQA$@v}}1lJ*bi cU#d{unTkjwX W<XCÐB# N>;2"fK!0*mg.UŴZFE%fe1W Ȃ "PB fן49x)&`X婻%yvWAOH2&U<ݧc{y? ?GBQ0e 97m^ź=@_:C?{L3Y3B!tdH<9/t̆6;Fx9ъ'ZYVVePǛth?z^ uVC3)q.n=Ǿ_,q- }2Tү;"zPxi"gN2UH)LQT46_86AՀШÄXeRG6}D>]]fGJ͛LDl[̫ZFI:Q+ %cԿ^KΊ!w.XIqg BgAXIbU e@p^X Pd MERT'GޏNo(%cp{Q&}j ^WЧY$b_:b'ə+0Ot-{N-*W[ߚbo~Mw+H 1cAW^о0wɫ@]8]xIa)CkwX$m>El8;՜#:ܫ\Iw>JVؽ3סۼoWVPӏEtSYʰlri.1_b v;O=Jv` 9ߞv;YtXIԡ} Z(Fb{׿Qlw&4ITlU2\s1hocX2c7C7^ޣ@OX[yelwΏCܣr{)'|tSݜ,H{.A_ϭV᳡XZݮu$_=/ 97ZrMt?x )i&05 5N˗N|It_E'|<+ g4"\ZU7^ ;TXko+rRh ʹϡEK^5nlP,T뜦mBM)C$w>d̎wv}>9Ch2կ?DҖ}O2Gw)eizIDG2} _̯SިX}S8o~3ioydr~~?qUٷw35)|Jޅ(Z`V5t7}h7B$V@$,Ap;+[9wx1{a+PxI9h.D̩+m @p2`nx۬*Kq*ְæ2kFIFhms1BGEiyyZ VFK-dU⽟>sߍNKRID#x_&Or0a_CY &LfYc)m+H&cGMY"c=QQmn7 p@ի/(lOr}LQbmINx EdHeT(*{L0y>=O3h84K٭2oȷ=K7v:'rjG٬IcV@ } pf->%rG>}3 n[ "p9fNe9Ktv+WȄ+0J- +Zs*V!Rǟ1Kփ`+*X>YEl)tNoʢQ1mxIkq6&YVG,깙u֨WF'#O(olr(R莚DӆdL asf}DL#$ΉǿQ2GU+&9Aȇ}Dx\ Vsjj`Ȟ_Uo{Ag RmB5JpԜ[nou#Rvs ^*`p,80R VJ66A 8dgևGtaLj\hye%}τGVi.I!xE̲z0Q;ѾzvH0"sjϡ7(!f-~?]NX[H &ŨqS\*&ƱtF&I0`[ ^d_"_6&1ӈI)[_.~lAE:4(555%PjIaCnSZr*#86҈P9zGJL*\Yx WuJL|K{# XVX }k8nfI $UY[{JZH(ff͊b;E3mxP53}MI{BHXnZ$ryuȕH"MHƩ8% ]`aj,,q[`S2߼aOaUbM N.w+3gEd07YɂdK 1,[;3eUwe-)h>/md#@^cN=;{c x"wr"OtS_{g]@wЬ&㾂yΘ&wq(-z]ض)QС 綛6y0<꘩[mmmmmꟕO.Hb/nQ¤=6Sgݐ7l%aC[{"+2#*/Tʧq$I$?٢R%GN\HIrA[|ϦDg5-q|I)A ѣ#,y2Ѭ^oд?z-N{TVjl 0/v'?/F3= 楊`vW_B?T;Rn(PxUTB!wwwwwwwYv\Vx~>i>>N|.ŝnCXb̲H fsPE4.QUABy[;JU RRpR&v-t4 39|Z+]J5M_Vͻj]X^u-''ٰ@O7'GO*I0]zSl ll6mEt-jagE) H4B R$B!" HJ{ۦ(y!~]Wʖ6h??~'1?YpvDWz}5O[_G7?' 4B H44DHȀRB":QbQ! pNPK$,Q )?f.̡p\: ф] 1A!vJHXb2P2l DL@:\3I4$Ă!$) IrNV ]- DHM2SK  HB$L)$$#JA*y1:lUCUVvղV mX3Ngl[lJ`Mm3m$5``HMlc ڰh5rZr[@lml`mU6Z!3i(&Lmge$RA$mi 60m!gk;v `` 8&``$Kgjɶód[5hjɒ60mZ l v˵VH5&M) `$Bdvmkg$ZK&,iѪ 56ɵc [m[iLX+[mlI4U[dڪmjdLc UU`&6kmVl4-F9Ճh֬XmS5mک0[UUldՍdä$3\9 `Й%0UUUUUUUUU`&ÓIsZm&-mCAcP#v&6jmkFٝN UUUUUUUUUUUUUUUV6ӂVM&և60m UUYvUV[&bKAa6jdcIU&J2d-&IX0Aj`6*`lgl`6;V65 mXѲbmUX6FcN-TUUUUl앃kk&h &@&[m5dé2ZL ĒZ4V\lh0mlU`Hh5'Lb"$6 AIDI r;`&ӒՓI;X0ڰ`9-glg5sm j0RTUdڲjLRl*&1YvɵUQd 4m d%mCjrm1jXlCPU0[dd458pU Ui5 "b9q:D? CJD?\gزT/rP `V (RjT#e*ѵsArIic;:cmm)lѦ kHt&ؤh(˩i`[em4AROkZzEa!HO}?zRUJ% (B","H A4*"Z#6.d 4PQ~GKP'wa~ @:}_F}:%?o-aa!T~OWaԛ AubJݙ̶oR:902v roT0 %~$!)Sܘ 2k-:"z zҌ&BAdBsQu݄)u@xX=1 R)ߐr''Ǘf~uO!y;͌uеzna=B\)ytBcڜ uTl7M*8k+ڽJ(u^6iJ}"%'^g 0b t|笎ŏKI۠u2@Ե pmX`gn( CdB峵0!{ ѯc,p` a<%Y:}&|e$5sn+5ھ`M1ZOT"˕Od ϳqtL?WĨET`^D^L_T# KF HbOi|>( F=F t|\s|O HP|'?WF `tCꮯ1sQ&Lo[(g4D< ۊ]OdIr(m\V)|0EeZ fGrc"8QX?7=G"wmX%[`q3}^ J1к̥Pq hNX $Jg% {? Gv+BKe^ex6aOo%)0.PADPnX21!T, J'E Z3V7 Лyå;mضt*Fy!Qi."wQ!rNlg^շ+_Dks5x5}0|ޙ*ˠ4_ uP񭾙RVﲯuN_oE/ۯ=7]cj~|n}1`3eȊF.3&i}-[RDڪy)Iݮ;p;e  u;aB ](!Pd;Mmix.Ejpɫ5_M]7_tdgyХJqlER,}QTC^a/$?<=XiooſGp=Ye_z$uW=˦.vfLemhy5|:v.wi Œ:C\+"^MvAAР]0JIV+6dPabJKa9R#4ܥ(}]}.=,-fOg_p ?[:`[9zD~QB"2 !$;k;$|xOLCK zߡ~A?147g^Z*[uGO'R}mևAۭ<'*ΛZGkiT_Foǫ~ZrafPoq~ ){UoO}Vj۪ʼn( j*BǰX% RzAv <|aK(g5_iUWSmxMWVP[azX{&P{FNdN1++RDnngbpv|z2n2\%R^0Ieg(Cw6R_ZЅrvse8ՎV're.Q$!_zq^Ym&yeVqvʧ+SO ckgf31J}d59^}X[ ݇c_O w2*1IUUA<WcUr f,|s恵E>zӍO/1Uu~e`ܞ¶ڙW*] r >Uؾ γ]mه&-K^=nk᳦Zm(1dn B5dIQbx1 F#։g5BLJױbyy-]}8;׹:ﯥo[m.i b!Űn9dBrw *"ՕJ_P /nW2;JOѶ74D?}5#/ܫۙ0uJd4bx7g!2Ҳ *%g"Ix}إ2湗3~ϹRIF1U}10p E(HDI4,-HU- Š P+,$LHDPIRT2IM,K(0 4PAALD %@0uIԡ2p8LUD%TpHIdY*$.F )NB4h}:bcua† HQP;2^yЦhvDmd[Av䊴JPl`T0q$]iDT H9 ? vЂRAP98Z/N*(xPQR,(/B A2Ņ BD"MJ>v"ؐ0} [ }~K-y@*J)?3 a«QCSk 1/գb*4$埤gΑ IuedOTª<}+﯌WBaF?b$V0~?1S1q%Aρ2A(*B* z<9կָ 9Z]S͙R] HNʕm| yY=8ٞ9\/`<m_mEm+GrM浬+Turx/zBj)W7qBhl,@ | @c̠`QwGL,_ITDINZ~A:PicpʙoNV̅QO0+r} DSs'*BNg`b# y7񨎐PPmr&$dPٶTd4t l / n&d (^U9[sD o Ƅ`Z8]_2͌IiBi&ա75lBiфP*JJvvr}ڡQZWP:rׇ{hCuO{_H^1.d%7@F RÜ1вRbNl޵ Ud1; lwk,B)TM+ 8+'X(]>O`d_ (ȘXC^Jc7`X6IT9'_<\6)I͋ $zOYҌuH7\a-壹`H@3W% 4ʲ2T,tBLE:GBVѐ,' ,B+EXQ%Pi(H:!-,i2f"ic!;gp7*00}0p0 sJCÌ 0ŋ-U14q*,l/$yKke1 !:o(`f{tb[`ŋQb8^pFg! U&(`գE8 NaF8s â5L醑]2kӄQ!Ygpp!"`j##Fxp<c%2D$I&D`PT' q3FThQtv,0f`/-%)K %YT3lZ#NY2XaYUt͆ `\4$tC8tpAGH[8* eæ2#lfX ln1m0ʄXA:a!iV,pYet0ۃ*0p0A9hltl" ӆ=:1Yb ud4s"i:飬4Eu[H5WLhA0Ik h%)ΎBD[$"Faboh/p1QKK\3\"]]2igdlf iv`if@È*1EN9HMfQP1 R2%2 &pъ42 4Θtc0<7h1 ˉK8wgK $xrͺcF'ՄIefmb4!YrU1\;nDit#aÜjBr{D%K!Yy04щ7G0M$hr=ɤedA&# V hČARD3zC#%@oG!3jqk,S4l0$q#sH s SX5K; ]B8b-; dFYU2.wj h+h"I1̓ y lPRætF/S taC QD-,CoΒs8#/:m!hYgNt}ńljdgMc,! \ἠr X1 :p2+ ECt%Q.6]Trpx> M]7FriDP+vq vڞh%(4vͶ jsax<:h3%"x`b6xuiVt:2Lu,!jwE3KFn :]޾Uz1ڮ0bgbc65 v!Rxrg p5r#}_&M2 ŗuwL7#%u 2jh4ِCwWӄک|" 5:Ɇm٨v7[v6*n|X672 1Yp0jG,|$שʉhX˜,:^"b/icg!Fw/%h;7v9mYf6%BE+t*'xhM<8[Yr×ܽa:r&h λ1 ~ަ.59;qwU97{SQK=4oujUY!/rMj e`qɪ(p4' (0wоEpi 0eIIab(sQTnLw&ۖGhi31.'56AUZh1 7/hw_Vݽֹa޾jVU8W}iib*rgrd6SCd\7J|Ihz4b@BuYwe7s|/vMIar`J+&S$!F[Qk#('SeL͝c SSYڳ"Ö`qN+p#!0)RȚV 3bFS4LΥ}q:yLF6(zyCƝ]X8շZݏ/WK4GA;ѣ|a^S` d#9|D-CWlFBq$q3 `-4S0x|˞m:25c!Cl"jǤmgJ ef :1AGƕaR׮56kk=^\=KY|3=7&h:&.XnXUUf۱\:.ɸ6]v'3Kls3|KV4&ׇjdmsdI#7?;3Iw/}~p&K1iuvnvƷⴧ]=&̚8+`Z#rC[Hy!vl\s3dNY#n6v^2Xj*H~!wuն73{0LLW3?qph7B)@6AwՃ֜ё@& 0s,U=di$ 1kBh(EBXښa>`8aH!h,]!Lfi#0LA#L U"`ڧʳNa-0d & H42*–Hh}4U饊 \:*d"гp0LI) Je4k$,.dZ=[tn͗=KJo#:Im'^%GD5evm`.d0#0ՒX Uʃu0P[8I XT9B `Y^],6%!B<:0Hb:wx ahYhg;hb"IfYqFLt):aaHbK΂(v%bV8F u aѓ`w,"n1uT8u̜f-]m13acNY}-5䆌4&ඕ[,󃫂˻|ɳxf%7F[P8 L%w_^!jt.:^Vǖ,[)]J c $Ն0Y[o78 C&_6g8YekD2 ~ ="^yp;{w3o[1.EV%MQ YI<}J 8RtN$( Mlֺi@YS+*T~|{ĉ5 YML> x()NO׸rtG>ClOdG) N^s/Àv( (zG]V@B8" n @Z[AM`-qCs yrGq! 4DM j âoآy.M~aAa*<8& Q:SFzXQRB\ `sÜ& Cpd Tb* N88@b-jPj2ժ'8  ̞AƒvJ*t@}M& ?}#C<-xn}B7QW*TUiTJ̱xW\^zWW[8ZSW;*[sS%y gI FeCUW:/YdOeߵZ[ca帜z}1]mQ<ڦVJ#YŖpfuOcFƄMZG-VX51x,n_N;/탐ӄQt+Գ]P{a]-ϕU[(gXxKZ72%.5lWD!C5mo[̰mƫ2"a %"fGo Һ ͥs=y,ܬ͝ 0dڝwRw44gH9 T"jllMcki<&Rb[?_?A͐yg>fVΫ\*'8J)r`": WLJ@NGƅP&jrþժBBk16!T\dx-:Q×'Ϳ"eZ~a)QЏ9.]CW.H{q=vnq_'kІKK4*IX8#DVȉkk2Rxae~?H,3N ιƒ<!;U9ۨ,8TyGC5`zn A#H~-5T.Qӥ{YIA j .wӚk+]6MRQiTkUq@y|ݦztZo~ymzףׯ1Lb}ypmU[KytV}bץ-=Ž~5K괥Nk׼m55Z|@Y#ѕ| <&}|RP92q!Ç$m-1!d+%;r9DAZ $oeqC79ys يYq34lN<*)۷M B1s1<+Ȅ`|{Y) ;PYf-rԹҽ`mhYIqmqx5XٰTYk%3 `dqnaơ攪ӆeCqդ$-5q*0,0c* ЃNtZ!y!:4x^C cH4g]LUu$91S (wu$+:73H qi,s8F(xK(u"Α UC-p*5jqΡؒ1يzIA&/DPO5۵(L6\((%2ory-\!X[gUے$)QcAnJ4RWRÞ13lc.,tArk1a3jf#=Ɍh(f㠣aӑUU"dYzlܹf6A'0cYE֚ bzK֔ J^^-01$hڎd4\Vcui!ˑl'JP!D)b$xŬ3H2.*Yn. 2u P)G" sAMkTuQ-d{52dYJz)]Ԓ%P1DCr,' q#YG\7z0D{2E)%{pJh%MI1 8u,-B҈S4s2v8bbT όF .O1zƝQ%EEZKȇd l4Od#ܳ)d1\:|s{8^ңJ(!9#PjqEi)O,e ӖBI^QAm3iA&&i9Q.afBB.V[{Ԧ g+f}LTWI޹-pvZʬ4!A z9 h00. u4_3ήj1S½a-ЕJ5>Μ4rS*;]X5k9̇Z)s\Ҵ3/U$Vo2'v<g._%s9<2NҬ3j,X3tT9 DI ?N4Eu3,#k ;ܢL+l vL1v6B#XxxncAw0Z5>Ù1UEgӦ? !`_9)1IVi:GدYo C&3}s5LU#Ĭ崌EeO'9;I1{ۦdsVy gJ Pq T,(J!aj$*NT ņ(F0ef)s(qT*# |Ohô =ڦ\32c-k;yrep. U:M"L6#B':̝},(;IF$f*AGԯnQYvģ 9cR춬4*zOc#ǣK)GKr"WzUS"dѓ|rXU:0ǔbY%*,! I1)rm&A^_ɌŒ;kAmGm=gkmFY"Bs# a9)V@ja 5l[<5i6k,tnV[\rry8XP2 .* ]]^w Ӹ$<9`æ׊% wt(>`tର 2)ZRi5H&/mXk^9{`&OJcZ9q@)bHmt1e3;'f.xNYZIwQD 3 /NtY]D0LOb,hCF>b~ $0hUl1U'0 1Jֵ<>@H:~~rUzxsI0 E0W\e9gc'(Xp9YdM.*I0da s $ 8Af d<,~t4Ftfx7á #mlӶף# 7lkSBj0+Yǖ1]wcc+Ô9 FiEKZŦ c̜S h=T1WGmx3.:0#qK`ΎCPd 7W*t` m_I@Qx5D6rx&^2\p1Jb 8H,b%0_hM J$z/ Qj%u'=ɓ&_ʉk4ʌIRAZ1QuC7YAJM<.Y+>LL nq{ׄY*.eܭ7-f.7p!m /4ThWCઢryF XGfu޽&[j&Vz9Ie-,ƤÛeR:ɘ0z*:]<[Bn$=\ގ157]-5adexh:<}D^9i%M(n0,M2-(M;SQc()ÎХdkD{[R2buRaJ {,Ö8H#ZP8h(< R`npP||& GIi+A/9H#2lk00R1K(ǡ7Au!Tb]īPmK\l (k`U'dռ&\NzlQuk85(8:{ OT(Wm+Yh~3Qv9ÃSF̖YƜqnjq081ZjFx( ҍ$-鬂賜()W1~:|ya&oYe+CA2Q$G)2O>ajY6*Af }CI.=yXj 9ʣiof2m9K\o&,.!neЉ 4+k+r\I(e}r_+scu,PP)iN>ŕ :W&-/D1 r.˜#BeƆ.&uJj1YFlh8:ULFևE!"hs]CK>ϋgX6~d; 6P'D*:)),1NZbv q 4slӺeXuf9&vhA*>/lޓ8)vaeq080a.Ri *0,$b8֩R]9q VAe?K*866`YhtE$A ‡ N?Θŋ/dpiaE, 5?-,0Nhܥba1-W,u ̷>9((l"PeGeJ@Z`8,`Ud 9i'YP\=w;aDbi]v&ֽq*) F Q2Ëq$f{5 s c6"% ڃ8+ ]<KQX)#Q:XW ;F=MVD{֑q=FdtD:1ZDַa/c޶e+t8֓N La 4 n#NttVx_ ljryS69FD HS:e&A + 2r:&]*QeAW{VÜ3z^x 4biN]1$N`L=L#I xw(!RwJLuެX!9R0,G Nc9p9P80syq\JC3ѩd212uJu\jc P9d 01o#*v8aFi'o *zv$n+ d PX𴓦]Y;R:<)77:/+=izk-Eڦl%g`fmlm6=K˵8K-ڌed y0z0`E2,f6@?FEWHcHȾpM+8lf. :իp;mZtysk*#sEd@:d7^QG9fVj0UɚԜiaQKLymjJ)3r`^F/^&4IxxC.ŭD'R1Hfaח1rţ(ah4bM4a ,0ņe܅O' IHq#YFQ:IG*ыA=`Ǽf.q^-LіXǶTp;*WmB ,ttʴmO(ok"Җ4JZb/Mo%Ǭ$dM9o!ATw#o5ge௤(oOʃ7tg/'˳N{֛)BTȪzBR9R94NUk08 kyCoU{JS3f? }-ؿE%!($ P(0c 1QF!ܺ{Bqupb]KQFMQF=*{ڎz}0iO/_N8Ԫ֮WEYlsظ-KSR%Ѯ6g,+D$gٙ얢eh$'D7- ;7OtI0~DEDDP!!BB}GU5wxF E{}*|▐/_oL涕kZ-lI#wPsv/~""""""""""""""""""""""""""""""""""""""""""""""(""""(""""""""""""(""""("/DDDDDDDDDDDDDDDDEQEQDDE=|F'..+jMVl $ښɜh&``n -gMw1.iPP0Y0_S+pQUTڙ0kG3Ӱ ``{CxYIPXH)>Q40hB~`~7wv_wwv7d7]qAE["`=1Qex0Z"s0ʨ|ʫ˼` f(Ԇ@U>A<Ebg.`($p?+c͘$ڑwgr4p18`0@533I\ ej%vo?V/ۿϴx KTӐf"$`*v Mh UL3+2`@ M;1.l։\:J?  4+LBwK}Ǽa,z 8y;# 6  pAo AB~tSfTRz6 99;6#$ 9jaՉRD\(D R*ÌUFcn Wk+Vz"3Z0YX,oYTڵE64H].VRB|!ʕo2ƻk*1!+n6S62{9`I" dQNĥ;&rĠOB`0L[2L D9D63~Õׄ:gAk"=d_NXZ`4RTaÜ8& LIz P!=6B yda-46qhW&V[^ƾUA,u~֍lOc]fUr,vqn4TkT棝b]g}a P Ao>膂F2LصX~@lPq_oO ]VY8v1)*sS!Chi䣙bcZ</bOT@OzUcϜOR$|m$1쩟•~g4ܒ. 9#W9P8aG徚+?9,0~>h.~?"&Klb,g㞵ŽJ LʑE bϧrM~hգv=צqc+4!T՘BS9>ߊqd1!g Ni YeaPIs+Pڿ}#Gs[>)|OC,Ek F<9v\0l_' gPQ3xr?r/9z;P_.Pc<;ҧ-r$rTC+0ru )K] \.r$UKY}XOߛcojr1$b.V-VlYXfpt\:,;l-S? '!PWuWel7] b~N&сޗ- sb-MJ{$N֪٨{$L%*bZczHSbM4C myu.EѵaXHiZXAQ}PU׋N5tqdfe-趐aD,I R±UUܑPM߲!.BVy-L|o(ڣk6KwV=2S>i`4 \/RB[֯jrQm\eTLe1 2?R?2Qaj〢m?->EW~y QR(.d Ph=Y-$;FXЮBfI(/A衛:OV"/C8f9!2 xAME _ dK\J6R,h bV(j߁w>m^Jb]#0ffBIL61?o~i1mb ?Ʌ| ^,-i2J)&TtVU<jQY7Ǥo!.l]nhQ&94f!6J* ZIyQF! 02N XgŅZ᭙i=6*si@ԑ끻W1jjT6ԩ!P0Ek-ʧ֟Y4NQ#,dؕne!ᕁ4)Lk<P|@/^;,s"F&3:Jw ^Oڸձ1 Q0 ު'݄wV鍇NoUbCj eG9TcAYA/H9`;>UwAK $ JRT &fZlѫCDML$>:jQ_|idqMptm*FF&L8Ӹrҭ1ڠmKyJ.$L$kl=: C*v"ғàցXRLeq `U&S/z F-@ NZ%;|C?Ǵ eX}[aq1R:cUi2+>[yˠggL3;Ѯ`珈(ug0a7ݤGuqaciX-L+&& !ΚP*FHIc*)'0\4yRx@(ٱG̽*EƗӁE`+ HΣq VoZN1G 9#Wu5F~%Ub:=@~*um'V17 n pF2|b PbKQ -Tp>DʠRFn.atd^yo,&`}zCtΖlN(4c0?~γ9g!;|?Az][uoOEj~uՃ0t3kreZ2GBbk$#] X {V6S#\zŒC&[ eIG>җYcq;i}̲t_z(Ao7bpI7iLlS cY&DhRuQ;ʵ nZX-֑ǝ`Dh9RA0(6 (S%qEK Bvf0&DFegG ءRw!}5,Ns۵KwA{XKqB>-8xto9xmꃑI @TA~2\4`. c eďQiedaOmB KB>#`vq'=C@Po Nf*` ʳ1l{h |QsEc. :CLe~X 4K1E2j!{O]2H-8eKHTq2_`KUo^; ^LUw8#O\)iBZ.^E~?1Ȏ"h܏nY VhGrgA4H _0ސO%%Snvsf;H9J=* A:T9YoX 7F<!r@eZS6nĠjkJ-ִ|?}4ٗvWWߓE9Ըln.^"|_)F3PgF<\/ @yreKiO$ʜd?L6e"C)]by*OX1 !Dq7GxJBErjl&AsҺWEcd$8]/8`шnd_p{p.rȐ^m6I븮:׳q܍*'ϧN\<ӜMA%3|h}*6:p*!Q{Kjҫ]!7!K4PjFstyM;E^ks1KMlA 5z;UieE-/ɬvNjxszͅI^% Wdyq37|olARäc 0vS<,G-?(*c ^<N[y[ 驐Ħ"+t`.npd{, B5ۘsǘq(ԩ-7v姂:,sWԧٍ/2-ǧ˰#25T'f7+6NOmAuH5:"gISnكCbcK,}Tӡ~=Y|Q>lji0s̤)<EKM71!d܌R^X5%/6bBrHI tXBw>o RoFwg?!pK ~`NpδԨq@2NJδt3ӣMbrK9L`bW1ڠH,mvVY &$`.1Ȏ:$vRj+|m y>5_r_*bY|c}G\l02.lPMW cx5E}P; =>HYq@^_wQdbiX6DŊ-(ض 5& lZQŖ-5IL0JzPBVqE1IQ/Vy 2d8dҵpumLX??|h+>X16}"C/)T0Χ&8+?tiPJz>2Z&gIP:}` \C=$uq T(Z<xvKBٕEg2Z@||5O-)*Q XNssUwgt .)!q~Gq)Z5 $3>#n65~:*esO{/A9GS OR2`%{?em%)?A<6|}ϥ]MH҇\wbOKAFVyF7_N"l!Q"E2qϯ4tH kJ䓁Kۻ4aYAKCy 'Ql.of 5`vo X-;/{@ g~B4l`D誆 Q`H߰-t鼄AsaNHPF{ gv]*.%4`񣇡DEݔ 4B#oQ`|8WwG4u(-x L, @Cc.a߯]DU-ArL&li[ .3Gl 8hhCȸN 0{$2`SG4Zݶ3A} 9M!U dmJFCHRR%3Hn b؊:kd\.'.1 ٩B-XHԬB j$MA,pnPﴕZ`1B iIJWecM#BD4A&tĊ7_'z[?bתNk- ӷV8K$TQCSCٵ)a#XڠKh`7=>8r_m(6Qc-yҩRV9`am cH!,="% +fd WHEpxGɞ"=K#2ߤ%3k\ E8,yCDwCU\Dc!7OА,v{QuszN! K"@d͸20 lְI#%5RՂI5COEntA4RBfy$tE s02^"f'Qr^s:B44Xd,(xf2Qq^C;<{Vmk)2@TnQqVP(!n(vp[i=BaiAz%GPL |j֕lg{Pw\n4bg:; Tl !PIL$S?Zg w qHg=7X'a#og%kψ8*5N;|M:8_6,C/aꈕ7Ϩ?nlr}R+2Efm9JJ~___\g㍥h4c gcꑚnr1PMa+jb2֩ò:7nF)=bz# RF `` 0A"Mc*1@DD'$g00YpVLce AL1F(" d` 1m`67p1{IAÅAI ł0O>aE r-;LB;2ygrކv<&sJNCX}RwtiJ¸ʕ Qcع&LU[^6ueq?JGq9O)|kGLY \xz4V:Od@el5eQhe(XE5=+8 VFa Göc| +ŭW៘i&[F._# raߪd heƘqM]AWH]##= -)5>ϥ>B!y쐪_xˑzwt{*8;](R@a[&RafWFEuh[3!4q5Ej5@wNzk'm2G>]v!2)܋'aqFj;}0še8jr3qņՆLH!_ed a|?4り+# LdjɧatѸ"+G>gʥ&XI..8_wd}Kf$0Ո?,#xyZ͊NñX+}e?7I=+qm3cMeg⺋az'IK?h~Q4<1J=e:|9%b5P"~xĤ@xvsǧ03UFE]̕1{Cފ=EZ}߫Oð=x/3v>&A̻]BBj"%EP@Mg=+8XST-c{gWd҈tm}t, XE!9Ѯ&)ˤpɟYG7vo=iez2y];hmx*L08"^Bc]v!yszҼ%K.YIV.(u52ngs{W졃d;ᢥ ,^4X@Õs8N[> [תehq%!bᜲM $L1^Rʝ8)M%$a[2#&>OCQ\Gl#,Xi\GLRԘ* Xom2A4,➪fdJ,r[y}6 9tk7htg-cV8n\M)jR]Vœv܇H1g )%zPԔ>% f{ $&a澲I6@48rh\3Q e*6ui)k=".Ϊ^TB BBnxrsSNdN~Tr=:ޱpđ<9fZ B :_2ipd1#`Toz9{C=A(%ߒ5R$E:Ii~lHO=8ࡅ;P4[]q'H[QchFv%;Y2׫hbtC68Qo5 ac:h)DI#ң&'q2PIЍٟcaSUjB+U c[65֫A.ājlأ G`DH0c^G.d[$y.P PB9rp~B6-ߔ+O7斚ƎiDӌAE6u>6{Ft{T8[iCJ쪎XR^{#j089F! 5%^boހǤad>ԧ/ YaKfL/ Fp4c`MPQdj!NKW|*I1) 5#v"}5}3Uo;M,q'ĎRwL2 D,v/%~Vzt]讐sk@]a&TqX%0GL'zPG5pt} lߡB8';HPэU @GⲟPDFpFRtyڹIr*xgghC*2CaqrA1 w8A[T20,$_]3Ά"Q%MW,LxqSmkX|:$r6k!ňnE8YˉŃ8+1ALccYbpllcCl"4c$5#cbCz1\p#1 u[h[O וyd{c*g/tۇ^w^ùM.T{b iŖ"jil-Nh+jII(A @gۚIc{~'ឯb]-vz&+s7(/mtG<f" 0f8b|\1`p ¨h10)~cijcyBxuR:RwջEockGU7u]Amybcmcr-2VUXiitXb/K^8D]P\nPzIڂULM4lb_"F󱬜3(ᬻ WH]5k^ga lXņT%-P1WaE35h.&BE-EsyH&,]LY$`ODÏG{-'7}{1bf!"a ~A4D%uG HDyzd @6Etj<;X{ĔHvdIJ̠ @roo#v?kHB.yhViǏBz{|=%|F*uX& ss{V.՝B&ƫбSUB6©l ZBCBz7Iw;JJ 4#oCZx.h<IV^tY,16N3nIΪOT$,8@,YI]noUFӯ~a֏>Au5wLf?KͿtYEC:e1 GmVeE7GS,X^".s, ͔TjQ"9. K,bl7-3$L+d me&*F!Q3I$&G LA4ìMc_>O,jr0gBe/Lk^W)fa#]2L_2e^Sy ւLȻ!>-dPG)Ίth]؟UJiehbRx:hhX?2K~3gRPMqҕ/ _jec.ue'n+AQk \9]#Qnv_4`6q;Ք,dUQ_g (þЄ#uRxA_Թnkė;wA q{uM5bBM$ˈ?1T>amKŭT7'bd4)`ʚTXY(I }=SRəMDME GAi]ĚIpݐ9*)P.Aa1ZfXN8qFbH"mBrN\A|0dqĝPEa㸇;c|Dvqј4Pφ􃲜Ȧ,nICNe \P%W*ɂ8עn 70,Px*W9ח$M[jOO-~|5sZ6`ΦMowԻ*7z6;cv G\vx)X4 z!DަރwOnwY&6na#kx>pV0NfǶ$o`iȍ>e9f nd .)gVfꭡ;.{5AՐ /VzvhUV`K8hcL$m{; ,԰'_9tb`?aY\HγjxUJEGME[G}pX f؂,q(h1Dn/5HN |3|NPb NC32"J,eGcԹ|%1#ǯj!d?4{` -旍 2*!C Hc:s S#W drόapکh>-RH(Yo1kVs!PtK,"ˈecRJuR8ǡOr4i 8JB4 q'M_4vijSx.)RqÔa;qr +VcMH՗ %i=gRc_iê*>a͘| BqӰbeS:t^$T))FI4cƨ4Oϒ_p` 8TՃ"t6Y.Vf R*F4kF, : }=f0W(Anpы>QXEJ*ZBV&fA ^-He]V Ge&5ZS1~IW,XpQ내葐-‡,~kLEEha5+GY>smBqaATU0<7pr`T% =l%/3Ij*!9frJչ(i4eu4O˴#yu`TB=**lE!mͨ F܎ۉ,}9$F$ &ɋr#crQ\%*~ÊeaŊ,l79_QN'_ #&9f{5 ^6/Q~^%y33 **!P,.uKC`TP$eJVOfWM~JKՒBLFD`q~zCb71QuV* b|u2GG#e{+pdzCWdYn$(ć)יxOv'BM_$e˘AI!,]s{e ~XxN\ѓ3Śq9a)̺7ЩjywѴ>-Ae: :hr2ޫ{ˍ˥=*U3SUsaWff,Cl6{wAoakᱠr܄ĸ&Ce=UK q:"s'#T?`.wP/ 5!E-Nc=/҂MãRjWRM]s&ǻ{o_tun-1&I оL1=x]wq^  a"ohe:[EeN 74bVsbGlYC&Z~. 5MW0pa1f5v6hKm1"1f ]4J$-lZDuqPCB!ɚ 1M*Xhٽ A`bJ*4*x;eE+6,\8X_{,{ç4К[5R٭Kh)6S?>mM]n ҇ɅX:g%|ʲ g &P;-T} s@DF;~5E Xc@Ta jE4,:ZH8 G0s *NшxU*V{]Mba4ؘKUvS7k+nkzFdih#LzO V-M_pSMGȺS.m]?g;Y8c^JV)$gn2/wKt?C&ۺԇril>:Ρl&I8 PQEWirZ1< ʸLЌ S(4(L]R -#fRM#LPnIu+*^nhmm 49I*c~A]Ij Wy.j䥉'Z} &$6 c{:Xjr;,( mi頄ոUC=lyX$@fk ]aծ.$bme.!`C 0Ѣ`)kRY}`B=2h;`k:ԁbh ϪGWY,a^bϚ(cg7,U99)}S$ P?-I}Qy9T+aYBV0L0t%\#7U3id&BdءDɒg80%{ 6E!2L12M5,2D)K $" 4b`&"5"`)`X)Y"&"ZhPc~Z#=Ŏrѩa"2b1&%&81"X"cٗ 8{1؍DQD="1>c 6C&&"pcb2㉊>> 8D1lD4LQad,!Z`ƼNh 4)e4! Rhz\հH@"<8Ab!HQUυ JHH0yɍ9sq AtP^q3'[q&P/❑$ >UZr!{R%9ܢ=/lT`؈d@\.1e3Aw0>_t=9an#l6aNXl E!0%@m}O 4jWg]"741$ijLmfBMB282.35CC=ڭFp?εjyV֍?jN39:nȝmdEd1U@β<\E61#&ӇiFI LhPcmn [B]6t91\at͍YP.Lj#*kVmgi>pOoC]:4,mOY_&zi}D]Q-'g .=i4 7F䒋A` RaGF4桚x%[pHZ=S y3 H.tLl0@ b [(VN jI>6(ȡ05OOnV&5M2#ƍұ9c|2N* "RPGD*a/#X2J%W*ZRki_0tfU 7F`{fWrdprN xeI ``vقǛH8iu"Bv; " &_q gJuJ2 2/߷+37:,`ς%G 4#bU[/T.0`Z'tHˏXr<2i EjwޏL`h4Jp(W*uߌ6ݐ$& 9g __:˳wZ 1U(hB2cH_U\eE+-SlG\`3wc Q>a I|ŲUPu{"Aw~cs& V S5E4R6>yu(bϕAvpZ񸚱JeZgtcG(4Ta9q'j6͢B͍|ݧj荋ap[*qSVN;֝Ie0dAq"4:f8} ($lrH TR.Hh4I˩Bhսft\2MnY)95dUe?uy[ĵ'6KU|r|TȡMD+ Z Lr|ٗ{ ᑏxIFPQJ*zRK j8^zf~0EiU" T`l`ʃ!D(Z@DBuD!rr. 1^WH %ྈ.V!GW?A3 ZB?1Ò(;BeE%|eZjhWw wM{& hCFRkvhZ1 C/u5/#lrpji3E +EKICSeNQQsFdAXF;5`p{ˬ}$g `WԶEXAnvq ,RHRgfMޚ"f \]c]\KJ9ҙԗ T3C2.9к-_n9 tZ > >pʷRI^:E`"w P^IDťIQ?Z{>-+,GM;eNzN=^ǨK%C naQP*dF2NGDz}1;mX8e8I#X"Idу!b#F1pq!bchDqsPdkD Y"J3a۴notѐAm$׍gr`UW EU& -V6qRc4RG`Iƻ'DN )kxj?WE2.$$T %3^?k|iU{la$ Қʡ0AT#p#ͬۤ$h1F9ya躐)c2g*-l@`G[>wZ*ܴ`iŔ&'YH ;UQ'&UrtDT$».H#(>'JHX\ *$&0k)GNǃpp2Z@چ7=B5L# iduy !k>bΊ\}LxPך18ǒ{j8wѿ]EK8YxbY(On_KQ!kjT yY#TA߲qyRZ~JFI+CԽJ?PgwBK!`vȃY'!pa_\_]Pֺ8"T℔J-GП*}v7k>R)/29q)AϿCQ>Xk\+Ly19$yn) Ye64٥}ض \jJf$LȳncuItxe*wAbPccp4394|cu`ia*44Y1%-> 9*~Mynv[J}إJ3!3*w 379+J3X'n/xZ5q1w}_?3x]=CG;kui5#-'6x2P=┕EK$yc{FZ0?OV:9ǑF*TK7G2)jp; A%5EQhD| Ve/- ?6'OM**]ٯ^Y&su)⩁ZZe꯷[HGW*- " &P-$"2~У;8W =;[L$runB!S5@LPZFT Cz_}slYQѰ쪭[C߰lEd%GQ.& jGTЬv" dG\g1#Md lClv!&m00P:5iHB9F#} " {jerafYv0vsq&dŒ!Myn i͝>?X~ƚcA9JK߻|o69-^%=6X8?rv@SΔB2/Rah\QՆscʼhqeF446T;}i]QK#OO|6~h.O5B+h7Bۻ˽JQ"qSs30{z.TQc4J=.3C=6q2x"L JGf+Y%|=&2I*ht3: uf}?nOZْXbaWuD3==W|/X^s,hw~?G2"S*,37>4h ,qɷ=;V(ǃ~~5L÷h*{^4?L?A)igLLبbt%NVL  * V3@y{ fmk+i.EJ)|؃TeRg/hBޘ쥿2CB^CČ^%CoPFB'+zd3_]T!zNnA& 2X0l&8E GY]ED"i,j%`Ac M9{YH/2uX#%ER% Ra$1 ̈^@mU("E25^%u-af bĉx*\u+]=![֛<~I-;$(yMnNt"U)#0K c+JA+8*IR̮ew{ubVSEVXe/l*t X#M2B.[,A-m+͎Tso'dZlQ- 4d4I4{]cpA'61 eUBWHHF3i9>9IDpbI! b XDcE$ &# $Mg8ŋc%c1$M12c-1V2Ll8AA1180b2`YƳN4d!H`ƘֆAҹƟu[<讍,)*c|0vP dQ#+Ѽ`ލx]*?,evho_d. ic7NhCXWÜ42AX3Dn3@IU' C4ށ?pm9A1pӮ5!E0Wd\N<ݝr@2YTWEYBR)!ђ:,F@`lB lX-&L{/li*VTڂDi9qjNճ2YF<+*QxL9]' -{ܪuMjyٕM]wT⨘,H{XUF|!{;(c ٖ^_DT/XwL%GO,ER}B'wr%=fJ0"#@qQp9A3Qb5``XfIc 1Z%\7INf猻{cLVnITPw#u顆:4rʢ - i>Z˪rÌW ? Om_ DA>&U@koX yLHrlbw "h#7N; Q9r Yϒ7X|F+'Sau6dE0JCKkKM%A[} xo1SvdK7Zצ8$&IM|,[oRu~=#j,MZ &,4ReQTV$a}AAI_x5TY"յWݷQ kn%SJ&'&dmnҌ;JYc&$]SNꪶh!н1u1X*"I/ M{.{YR$W[Y5h뢧X̴H<-a436 b4G x~,hnqg*efj_kݜcPM VQ,1CMiBᎢF9}_f SLJn'XVR89Ȑb C T0\2:}k2w. dF"\3lٓ([; N8oNE$žr\J2` "vG4 z ҖSf$K&9ElN&T.Rw/^fΒ#ShB9W \ K|^,tD 3NٶHJM:bTSG2ذbfT2!B?TتˋmAr`~ֲ4V6hgX2RB!~!#d85ŨZT\m+K}SY̩YEz)6_ pzPblz8)\!-FIe<ؙ-37|;C¥$1rcK.QQժtAy)Y>F@В>Gu~QH7N# Gm4\ ![f.{+[&jT_>G&*h&DT CD4euHsKKq3sS4+MܵɴN}_k5w;>YۻN}(depGIC B̳G1ɑF)Io3(N5?E < xau+УRϸ gXߗX]VH^ bDer03@\ׁQs7FQ}Ħ<QދCncԢI6V)ꞇG'Bsѕb"k 6fy8-8IdĈlT-)+?6g~iLul/8^ǘL&(Pn›Xc0aE8ɹoh.<)6]ʌ&ǽ9!=(UTv\wkR$;e[4Yc44D%8M0oFCo/܆yuC1Џ/ H\C-9فenhCmWV 8sm0;20|Z5Jl|# zĶb,Ü-$jj"3ri-*%)"mG*BF *Rl `p$Bࠆ@RL ;e~/'hUa3:[:YfMȴj~W2s"Kgճ²vT<<:,%5eu12~eL3nIKB:Gg*].`NA'wte\{&%K ǘpLw&7X[lPTMXps(rDr FSvYn`' qZSUAH$щRɛ{3(gX%E#Kh`r$$Yf\ي2ctSJ=2c}A; \J5+z  (aרe2 wP1 BD4)-?U9,|/ #z*^=}yV;!skt5nMhXƋإՊ sF2ႠlG觰} Yvr$z9S8F[ d?Y,02:-ȔF(8 AXcY6!6c,C胃1JG#$bHPt4$[}J @Uha`<"HKkG 4/ 0>{̇Uo⹬UtV8z 8r+Ԝ/LɓwNLADUV؋]wxq k G<<|e%Iz 2wu샸~;M.:zkS<Y%; \} ud9#9 ;uCgn<)+"8-Қ}1@3zLʢ۔TeXƌKh-LO:NhItN,0bƉ A#% vv#1m}rYٽ6$a&+ h;c@CAǑy[5!(jdzj#ֶ* P^5TF"\k`E 鍰F`$OB tȰa’G @qujUZwZըT펌W6&ǣy% U}4P"]ZZ+6D=߸3+O||9vf;fH*M4rI7\Wдb+kj{])RR D%RFI4/ Qބya٬̪_̏Mt9ՐrjΓkQ _NL2-SAÆ"QL<=BZ.Bm), B xe8bZrwЈP=g^8b}!0;aUEMV3<+;0tf;65Z>4YTՉ -,VHیg uSSh14?Z'rv_,fr1@b,zU|%5J$1A>酕Q1pK4\%aA@Q*5h6/V,@mZW*+KFDAA%]$HǽHZE8K[ l w0-O9o2i-N~סWY]1UmAbp9p:2_# C&nu^;ߌ^n > {c([cZg E@$u6`atЏsځ9ܛ|.[ 8DI1d3eN[FVE@ĭ(5MX(Κ2[;Z:G-GOҸFKP;"ĜS"f~|JɾhJlg2[Ŏ ,9QXc&#:bR'o6cjo }DE]J{~Z}HTe]=z>,T1J`, ٍg"ƥ3>xw d6dž0z gJoqC{7yɿu|C~={E4`6oRԦNHS)HJ*,XzUd;DGKQ>{j$GUL̇ߜ3k&Z;#M. .T0G}W 5(?2`2ѱ;}y\ h {2I8|63Qp#t=j2 PuS2Z~dQ_up82K9)Eʈ*k%p]$Ot' T~SjtU4PU/=y!_q~*~@9Q4(@C>QKD]}T?t)df <,oj$?XLw9y!/B kbTW gDðZQ $F <~N*XzdcЕz0lGU'Kmʳ`AL^O~ H8@:tdB^C !0ikp͋cᙵG+:7[رy)Ϥ᪽16ۣDyTч z5IkҪrU":;,D3[U5 p^i~ TZ׮81#" " u"YRR1xi+;ɆY4,f)[ \Oźnzg(,x2$9ui))NIׇpj ǀlr[`vVjY\ SNh,Ho?HtNzg=P',2L2A sXVQjQ2`6ae( 8tUiZ̩PI?ډ Ub()Y/iALjwglVdmÛE(Li0sdk]Pva1SœyUԸ\-&á=0rYTs*a@Ay [NGҎ 4-;IXhi4^4ٰ227JQJSYI ,1CNƨ.,VEckIS.(K1T5i5MJ3`){Vyj{j8A(7eTC \;Aɯ@R4N$"GJ2xL NZbjPP:rZC;<7jb/ h(| b zOq RUnVcF5vQR$AbDi2^O-o#oWM&`n}E`l|$s8>hMf&+YDj!B ǖRL Z`-'w8LpҖ R2̬ gJWRD{Ye*aI$(J%Xat~?;>4k.F%BǣHiqMVbc̢508!Y%BOf>pR+w,K EqdkiwA/pB?1x0:w]$eW 3TU/Xp\' }8f*޴Nai!os ׿Aa PֳKA[-7tq/ǹ p}?~Vj|6Ia`*=.*RUL"d}{&׌$,z:L˯IOYfV?O1 #8}u;9m03)(:*;녱()QWQ]@h;e/VWYZT ӡhEbUFWCm`$3A@Z(kJU::E80hT>.ϕ7GQXt,/V8(VLɴezw0ׁi4<xJVMRTPpBVY ]ULT>kj`dɆ12WJwuC yE_0 sj9 -0|l3'RW\̜,gJ57ukL]$mKV5}t#ؔD(jl0kL hhsZg&cLH# eE|+-hdՠ6y צik?(o~>I=^) Fl %z%H3iEvc*PJF҉PNcuƌ1x+2 ꮆy]y0fc$vv䡖FKRǶb1]Ӥdc2bQ000i*faoR,L 4&PyVj-MGka0ns`vj/dKʅ8@ ΦʩY<`:R@WtE92B% Mb5tƉ6J  Y&P3-i45W,HUi TRB&^CAu~{vw&ݨƣP֠ΈXLFv:yZ6&Of*JzalȮPP)tIO0NDCL{X~hZʆÈPfM$&ȿcìh? 34td謞hZ5(%_=w/$Z`=,*8X0wWNZш,|\v !aƞZt8ʘc?O\:JX!”b$iQk邓YCƊԓ愰+%cb SfH:IZY~b(j?~Qw6]]Xߧ h\:>'gUt:9MT`QMFK3 8PY 8|@~'W'0蝐*LǓ/7TU. BTmѣ2&0yE{0ˌ#i^_Ee͉dqE+\m}+Wb٤ks2֞~?ZwUBG}\6x0u͟jCv_yl(jcs 5]wL9mĎ*4113jUJa혼1GHCZ&MS: WJX_]m-c8]` e-&?߳Nl\% KIv9ʁ1l: .F ckTz'_ue`ζqVZ Vh(#؍PQZ:6g0X p,Pg@"ɶ+q p{UQ#?ʲ|t^8&֠B)\!5٥rWd =GDgv;Nc'߷K\IY zc}zq1턮]v^B~44fHHTx5!rJ,Zr,F s,(Ee=ᖴUӒH0 rv'L P[9>C`dV)8`*JTK>T5B\.("tWCEݧl( c*47ej3I1DJgػ&/~.U%q2!)I#"p2Ҧ$4r 8R0rbsaPư9ʕQ{u*Ad%֒9B4q{13!c@X I%4ƿZs]e?~")wOG#;|d#f!2gleq ۣDGuR2E^{@˭](qR+gG a=Y ]YrbdxF)=Y 6Dzj0Tw+}ti,&@-ƸM:P H2J yC,ImXcg_zAi V‰% hђMʝܢ5{r =Yi(D"); ՚E8I 1q}k$F¶.}}946-1`*F>,L LrUTf<6C3ppp9I)in,$Nh*j4,E6#Ŏ;ZJkr$Pkb(L*a}+xBC*zUuFM&*6zoy6[$cZab &6QI*r940/9QXk!`w644 4ˏ +00Æ0ſOrocx.X-cXal`\9$4'Kxod] &z}܍V@:MsO$se&>L,m41XwXܡuvq6RFic `3 me$jgv!Q$;Y3VM XŬ+CPqIp 1]e}I0E#f!`[&\v UU)h\Y?Bjѵ11& X>NXzBIyz&|q<ӜqL4ZN-$yFA"k!#J͂iaE. H@f !lQ4 XJ `% 2 2q]%uGKl^YtŽ*8qE ѣ /)ּK 6ϧ3H+=tIZLrIS Se:%IR M'bbZDɐiWtHAvZo;aժPw_t5Xrw`:oUyz8>ڞ_eSd06ZBI&=V^Dt4*igXFN WU] (\8Vx3&Ft8=_Q#g^'(//%ô=ZKAJ|#53YIàOkeُ"|6.N }+A-PC inZ $F} ) ;&>Ev[WgWgX~Ѫ^ (T;*:l4`Lg.hTcG!,d-U1Q5{ 2v,GEѝ1}a"0qzj뵍 nB5˄2./E?x`bƧ2QITU=3Y8UB?6 ݊?}yB8@ >8=9 n&c\WcVqB6/.e^Qֆe/]z2E䡴Pk3-6ֈfZE"4C"uAPW[H{~ &/2Z 'r{)QG=w|A)H/H{IZR'WvڵtR-ⱊ(R 3+#@ (n(n*&ʆѤ1XKk5zw={VCm(cGxSW!Q q xddC-c$X8kz`UD)8@J#L5),v}H@&Qt^&Fu5Im*SLa4HΞT%.Xb*`ldYVaP0 H<]?(Ppt˚ t5Ҩ=ٴLB%-r *˰\$.IT[΄1leP%bB ؂(;~rA-3*b^вL0T/f"&Digcq2*<0v:' (l%}?ޟ7xYܙMѨhbhߥZ[M Hiche%]i:+XS*G\Ze?oUIWLXDСk``7[~]3 -e#wU"!@HpS1waqiMe-t v$w*>@<,p~syi"\YX}ppp.FGF+adG-u65ZGǴPql(5#3L7i|.pTQL*!l5juȋbO! uOy'|$ƚUU\^uuub"Fg,,̛[$̃60]B3Άh 8[uiHwKj02`! ʁ{{I"k0"P_ ٲÒTlh ٷMvdJfK)j$ i/C8sqIJ o  rBYgYhpv1u/@=S )(x(0b%pEZ.r&I0pán H ~:x 7~NWpAP31+9Cslyc bԚPDM4UF8N @L<:t0K #XQTt^d# Xc4_aUK c@E,Ae,|0y^yJ&yU*^.:vfJh 88wÑ:iLGwUT1j,Ogch1.$H18 N4 ƙ6m3ဌDccFgp`%eJݤhcv Ѐ4D) `:0gs3*Qh.ڳ+[ᎄVV23bB\,iI$$ZIXe1c}0 ~:|c6HĐ'Vl, "l4ZlSe"cU1X#]bVtC\TC2 &-!{X;Xe${IEEچgYy!c\aA\Cm0lƼ:9\FԞ_5eRI<%ul 3>w6}g&$plj)l1H!642E10t 8'c95\} G6 dtM(R+Gy/.ho$@-N΋uW)CLϾ̇az+ҶP9%Q ۲bH MV6 Grr$ JA K,NE]tciR){;?rݼ?/ǿǂ$WŎX㟉1jvNu Y'FL!a% nsN69.ll{fv QS~[d=Y%HH*SǕJbAeF!Ep-S$0CYl ^^6˧}%( 4 T5*Vkr 1A*J-9D`E&)ߠrDK6F QiВzmsi0,6沾- AˉZl:j# \;.y$f_/PX)\k-p#0`Ϸ_cؿO5KZ+g[5u=\}8]yVJLwqO@]3FwmT3]n[яaUDw!KjH4|A!TSP96j/a0qJ4 8sJ-A9l6C ;t!qwT`zRWCѝaxt`gʘwX\†E]Fɳ0w,=S4]AVfK{I-;=8휣!eU"o{7|ju9[Iifѐ1ѲaFRv!ula 9\G&ZadS=2ǥ @H'>#&vaY8svw-(?%Hr~fV}89!V2WEh Qc L'Ð,kU4 PiJ߆yMR>.Ǟ6Qmjp4Z+\Id^j$2׮DF!tg.hqIi;O1KU#,juq2X F .MpKP()kv'? Fz. |ZIpt$]ˎ$vO hZhdÎ1a8%†Fʳz^44oii՞Kn[Fy\B\(3X.WfP0=#(c둇(0P F)Io*FPJCt/6J`?PRQoJ627X !Jhm'9\D8!FÜ把0krpsaڭ|ZDpj`h'Xy <IpеEлW[MP :;fnZjc4pA&XPmZn^\r8bJznʌle/c9#K%-??S@ڌJlU Õe[Y*V飦XdEE47SJ 'qݒ\wcqijl|V4v6 ʉ[=(9'|唼<8QNli0Z$6DbXǼYUE*O$QiT<2.ڵV(ŨЅ9+WD(4&6&I +L#BDJ-EƼ}F#8Ep6(no!NkfӫPbX jz=dx2ӓ_q0FQ1Xͯ+:76%HNuΐ NlP^qsbQ&Qt?9[ č,t=].U0uDJ_e5S&t1.)_ }Aue˄@'++&Y2E}`:` @#9>0q˳vfp/yqyJ&&V"X}c4t2/SHSA<%qC-ucUe)y ,}ESE q,IdxhH3zRPMO#\eVMZ1`0 cA?0c-   +*gHlGPh̢K.` (9IJ@&Z%+V peU[8.}phaŊ_soL?j 0a5. 4 ڡ! 2 @Ӹl!X(ͮ^՜DzXY#䱾8 =Hdz2qz,ܗ[b37&J^^cʖ'G?=y\,:1byR)%{m[KXQ؃ތ:2 C ]6l`&}RШ7dsc# &>.%̆A|wLnW ЂZ -_.dqw3FHV4+,w0>1IJ`Oy,3:dޫ$ AS\ī9OԋL5=W'A ?A ֗J A8:+…JP~tqMb1~m!ʖo]/Qo;6ƊΎLtl$L:6bM Q#y`9Ѧk>=xYe[31CyTH01 _YL2tN O(nYzC1? ʳ ӢDxJuc\KEzrtBF/9CaƘ9031)N Tࣂ2I\w'GC\GHW;tCI-[5M]ݥQOF#M.6Z䥖}vi?p[h ̬3c:аPcifɡ c czgj &WiB "nE]f[n 63OGO3a2w93㳛_4_/5ɗ8I9-$E9%Y%k"b=Jf)߰3 4&*P )U#ꩌ0Ӄmr\sC,dKSm8@JL0l:vP_3@  |`B  Y]] htV^U&skv*hhi1%08ИIIΆ?::U^:i6 cƃIaq̘Ք+t<5(e(8$]lv£nhn<-M^o{UOFDY*N vaSM_ZR )rIuqi0ALIAR4L  q"8`H!c p"``4911QLb(I $&y(3Q34Yb"1%8""d"9DSLL8p4HO\8c v~m67G]@@Db; {4DCCGb"3&"""1 D04PE:8<18"=H0# b ddDZ5t=[O}y0zB|}'p];D( ÜdnM%gݾǎӪF&nj5~*;nz=(W&0m 'Edw1hbc`S[!,O]hChd6c,fSLmMJ2(Q4^;݈D84.`qA4t:66DL*Nubɧ]l`dXgd$aĖ,|p1#-6qCk\q'Gphcb1PBKgvzx:0zL!OJ>* 2C+XP*CxaXXR4[J ;"`VhV2к/in6D[ mO<C>VCDh5 )*UXv6Mm9!Q`_ pi =œ$bPO? 쟍L#$:a-8 PK|FÃC4t0 j.u&&ɤ qpΛu#]*'/^NP+s3.j`] ^6YQbKU!&vl?{8;P%vKu#AH;;6wl郡K򤏌ib`v5V|@C\gASľnPǼEefG CvY&ϸļ˂>%x0a-즊khr5jn^c1XtvҢv :ail"i؎՚cB&oVAG3iGWU< b8b @.8;!"2D]fp[% n"U9 W/ i cAgSz!Q 24lea&h`1%i~b!occpaVʛ=1Iǹ_)J=đL l3MHŒ:YTV6ZF hOG&1BE(O|ʦOJM8;𤞔Bۂ(TJ"G]fqUu14r."C!تcm9WNԧ;T?Yfd Frko.Q!;v4We'e.;ElE˼R)ƃ݇3D)^IM%0cAtvҸϣK/GlcL`LCJaG ~)E] `iZHp˓P:]>9 "jܭ={;kic[<~)1 +YYawbГ旤\ɖTF' & eKN M-鷭 c̰XѬ\9Vݗ$o<54$42R8MqDiA I*8{&Z$ |.Ms8qE&4ŒT; ;V^r ^"1DQ*PJ.ء;,PQaXęb( 8ifh+3bttxԑA͗x .A)< uc++z"W]8둱bb0|-3 K "eF4vCPlo}zQZ0BG\ v0> >(gd9̔J☕K9ϩmzj "-!FM!$IR:ܣ%C/l%wp6y'Wlj?&u)9te. V:cuM}X}ܒT  &).;; ,c䦱h`vd< K0jX-ag gLB08m1 eZ!אr0̀o8dǁӳ68i=z;$I`"}ʲGKF$c ʹvoV<YZ,xS1&f^5=Xb~11EBW&OL;F0.LQ gC!^Ii3cv%pvr`/#whjx@ j2Y ;p4 D/`:ӦtÃNdYO-2z k0ۨ7U-5=E2jǷJ"Oe7h[tVA#(֓8r9qXԼt ܱnhڬbV'!BT, ql4x͕V Yle b;mY'VvDGKV> q 1gHb.A,E)88W ڸY&9cL_m8ՄSܛY #%32&KYc90DFq;&tU>rѵr\@x`q&{qbQrmӡcj)'c$,q)P9GR80V -# cF/wsFbEנʕfaTO-IF42()ыՑ)Y[ t&ZUaFjm~V 7 z:pujb ɥwiXvin$п51e?ߥşh~ÇNJKmΌvdT`i):0f q^V!2?`6HY Թ1,=L3+ A;=| :jT3tgiM/]cBIvplTf{KIAtF DaJ@=N?w$Oޔ3d%6Zb0fiE#z4 6UlH#.jqxC6 eKti(pn|*b3>{0'lubo5Ϣ^ )21$Kl_jt$.c!c=Aep(10 -Y9MNSm]z/)* )@ȯ6_N*UxWmZ xi%COQH*)cuɮ6)~ = `=1Chrl04B(>vu"fY1iFM%`odj50`?g)yeS1LP4{^VJگNXxH-c}`9-D>GZfa](DTWay8tBD Q˗V|u_itRoc8 2Jz;(@Pkҽf u;b-suj}wa-KCM&=ܹyKPe::ؤ- v|Бge,mCfGDbD ufId!zBn[R<:}V]40l Lb> fȩ$NBq+T~cTm|Oqa A\ px4e","n,ٶ}7.ؖj{15B{>$`25f,(̦ŨecRXgLU7`"NV 2ȁQvx1J$496la=̭[&2$H8cL>Oܭ|.IJ/=scA:&xwq`僱jj\GE|XRjh@ 5F bccJTZ ȬlFSF[֐ma+^ <߽9Q1 C3*ŎvVD eؔa#~p6 !$аr_שh2[68 =쮎ЦoxQghC@`˖ aS&?KUz5:ΐm(89 $ܹ8 áל$'`=ͶFݧ =8fG8tgeg0a^NQpbGd40v5YR 蒍[{ >(Yr7z_Oic-Kq {?}nwn5вmV,>Pi;Nb1A (0$Y4ĬP*s.Nr~ԗa֗}Eg-,_ j e\mlvM^ce;u"NsN;PtUK4e}XD7T;Ƒi~#:ȱimг&_'[-''SH[c:j8geyvq0Ȋ܎%$lg0mHY2}_q)Hr!c6Cc䜋*10 3bd2e'd0ԌbpX gi$)F0`3 Ĺ6ш s1I b\f1,dbXc$<$8JG[lGDhYƜD8H&0۹HD$%:kw NzKccBig(8482/l80l 4Hֈ@mi i}q<%-o42W1`ޘu"/4WE/EovhdH(\эV8r㦚8׈K.Q.膈U,ahe2RB,C8>~^ p1(m (YQbapA c%:Rb2LuףM40v6hOc'zxq=cP-vptyEFu+cIFo&rE Y*23 $[Lhm hmFX)&05lMKbQ*oF{MgΌ\֗CbEO-d46uq΁vٳK~X6T`*&Y [U>gCMcbGY ;UFͺ|k{3܎HxB 8à? [61=k+Dc/u u]5w7kl>:T"kPF Umѐi+btg[*$NcR|cM''ᥒqb{O3q]Xk UԴ"MM.\JlƑHSka\: ̽βyZM< 9<Nfb&֙#Ռ6Ns2^eXoN!vGIcjUH}Rbb f`6dbúy>4Lōh= un;ࡍ3PG1$%-"9+{7 18rNm=ân%RuX[s( JΛ؏nUT7;UEۉGݵY#l.&zf׮.]짘syl9r9s3ۯxxeE~Igߤ,Ӆ$r 3 &I.z$$qdї靫476ţؑ; ( )ZѮI0ƚpoc.3PcHU z"o^%ӄGn>]HokxF,Urmū-emCsM0=M{"yˋ!0$a&!I:c%Bs:Ëaԧ ;Nm-se!ʴY_3ttPjx]0ʴ**I &nG9j;ƸA~ǡ9jǤK09iPq U ƪ8̧a2TZdM(ęɛ 8PA,5[ * KG>@&̃ {p֚ՑD;iaݶbfcZ*k3 )lSLiquX:hРyɪnOp0"5j"C_9 bS, PqGםLcX6JۼҠZVy2.K}cT0@Nʵ?'Xti!l6JˢVUf!Hbb;dK;Rk9ؕrLǁ,S?;Mn_2Jpӆ$rJ,۱cnfF5PX1k9J-}zZI rQӖ@,:4>>H$Qج㞤1=W`!f4w62YTN8*ea '"b0jtɯMW%LC1S֝1ט!{FbidGmC˷'\8* x$|i/c*3s.8qXacVYilTږnUYN=jvS Z2Obl49Ţb/q&PG"n[/ǟwZ^1ZڴSi{-aYFEf[(Rbn3EAi`@N`Jllѓ{d#FƭTVe$SRSZ:VB :ݍޖQ{6CM{D biU%$oʌf0lb,,0pLdnIb>54i7CB5㱤ě& #%2ERщ,wjcs 9rlڙX$l kzÍI#C ck\Y)::u@>pTX8HëF0Rq"5J4 da+D(y ,,dtCKm}5'EsM%g8LɱfhfMl -usD\3?dn*wi(#|1&84,9 _d(ѣKu;=Ze/m`,PzLAzS 1+雱 @!vӽ\'SU0 Ju0:٤DXі5+/9O9* u(BͲ)-ByF>"]^hLן7,mȧYy(ؘ.a}ʙMX eyg(K-9y%P0bVUI 2;URRBIz`  ?\cADKD(9p}bMnR-eYY^2!c>6eG \ѧ,uЛ{4 !dhG&]14 σ!ԒZqwR WB‘[9~M@ (>yXFCݓɢ17,-U_c3iZZ}^-ZCA'|S wC;XKcVUYaARP\*#;.!bKa^=d* g6[,%>&:kxfiX-YGobIU9&hYXkD fANL2Ѷ}VGN Fdق.G(mhbcQ&W""ez[Lb0/ fRdť넳H peɧw07bZ0F朌4c0WUCP2I2AD6cblьg&La{`pqcb1cFLQd1$Q8 d WwZ8J6i)61F18ܨ/6>Q^#Z*;Gu5Bc $gXic.j~i'˘$8:id 9x`}{5C dp]9-QI0Af#f 1gXYͦvcU$+@ |@z 9C*;ؚ*xtic2`ePa N8į*~0Yf/0 x4넲+t2$ j=IyJ ֛ 2(Q Zru~4ҞR8uBlAe#rc"Nхc b"+*\)<&_ ?*t O,k}. 9UWGcl9C`R ؈!5`V}}>yDc•HJfD$94z(=!4AtWU( c1J8x>N@h\bj) JeJ ust$B.[ri?5R݌0<&~wN76UL,߄5F"LCLڭ@UHJY|ŌAr5Ќ5a? kj|9&z,ƺϝW $D=zaIIu+[i#esQօM M 0bB 2Sߝ6hNk#!.!ac`e%F9lJKNneı-* |RU)IrPٔd1DxC8c6y0SHî@ZҜ8]1AjA Yǔ0G,hť 56z/xtEm3La s" RdRtw٤lkcL3ۺ/٥iLB{t`|2W'|^50r1cuxɟ5S1K9 O4Thzљ\.t!AXs+‹f5̻uv "糨dѴp&B %fΑ;dX).ddJ B9?5Y0 lH j3ݿ5ϙci޷j/ bcO'v|\0hHSCJOJr8,pY[PaͲ* c(D% [YA}hM+ & V^edxzhl7 kt't,[Z'F8w'qpn eyE(=:SU&cBs>_=>8wqPZ*Ba ˓}'aCzG=z't6-&\U4hó:%Q1|.M N1 Rٹ-bjJ9VX,L:b9 ](f{rG44.u3%r_xhS*ʈ쒆~)p'2ȅ%pYpGKǍ&-0RhJrNZ*H+I`mt+/8C/>I$#sx-0ʯA Cŭ 6aX(uuպ*^wh" QLқoY8IkeD',wt2 !`wѩJJiJE.L 1,~Bt=*>V>6rƶbFǝ0[Md Gی(pvpZN!Mh&ϡP:{.333=oCǙO3$7Xin5#o5գQj#P LkT)+M8,兑W!e rX]*`GF`zNaU’NRݛCL ،H>Aej5̳#Y 45F2]Zɹ(1t± Aq3X{zgKO(gTc+Ha.Lã;|LlF.1aԔCsM|c H 1B- ܦRgXƖ+MHk M tvturx1#B33@LDH Dg΍:୽d9Azl,ꧻVKZoov= C-W#?*Z~u@Z!+dBkMucq}K7lr:+'hՆEYe,^,?3یvvXQEQנ,ϻD!|lrZ1vd6-ؘ ubv믥;;z EYg^)%K]2Ws6ttfNΥBNTo{jSVU9 3k Ȫt \gSiqYaZH3Uզl}ynD &&bF4&s|6Ny$s?.DDm P!)$ <HvtOvP"$u!sE C޳0S dT=Q(ܧh xTP<zF(= "Y*(R#a H0#TiZ"')suj{p.g⽋Y ǭ P(>A D;L!n˺ee?BjF2zmV l_13by*VyPڵd*%zO񚺌iEID>>΢*C#|mx_:2g,9gHwD=q "tj ,Ok4{b2 ظ !дtt(~Y~lfb5>su/ ɮG .8"bu] ]>j.vB^C3 #h류Y]nS}{>bkRyp3wcjw,5.oV N:F*to+$QQVu:M}.I?2TdO;X9fc'7i^3w]|lp1趈cZ"^(jk>PoUXd vV6Sno(T4f:b|W"§Y"ϡT!٧”V_c %tXt{1,4罦z^RTA;*'*EruWW;JTmbb4*bH;3(%[6e;~Jw{h~uC~KÌnwNٶu=g9.^9 xP/!?" | X*7V_>BQU Jm+R  )2a@ ͂q œؙy"u@O2t y^Q )O~EPP4/㡪#O={DN$$BJ BR O|fG +f'McBz~oMs]tǭ`rJ"{SdЎ9:R)(_  ~ jȉj~_ Orq3&N~85*ũ3NS˜PҦ˕SQSD+$75@U@OԖI$4;?B]دBQ$3 )3H)A$"B`SlFZ@4^b&Ϋ̘&V; d("6\dK DBq? .GZ깏\o5'wo;hf K )hH P+T7%NHY3wbDW9ׂ `-<LLR;đ lBv:"G"@+&}rA.'TP]x !лp P%Qgf:A C@Td\pq4U#y #,NІ *nbC̏D:2rcw]sNy8Dk@y.HT'l4U` *N.#z<<02!BD j~?w擏C@:v'c i*h&'{ , gj@!"NXXM6KIT$b@!W&z;= E03 RPi F`%N㪺F͎q*YT*18$Ih%UP#RCm^t%d 77^:&Ѐy8sK?2  A("i;!`u]a@5_Q N&67N⠪v6!)\0;WwjW2K‹Mua;Qm.yxX֝ĺx)3NdE;;ÑْdnH=ɰ;IdaAa-eG~]o(\abxBhSd;NMw CENlH xPZEHlҀ8*b  CKBH@-H4'0_eV+|b`.N yz%LG81hm ZJ0wic)cK bU8pogApdedW=}[ bW-c 4֮JkCm9nm$i'.IXՒ555i%h{+YX-X[8Bڵ`**SW%5R#~5BW s`A ((h(9XZ=^iB'Vqb 0ˡ>, |86'OoF-QeZ:;0uVG;_V/*pFqss^%P͍%spBz[ 7#3Tfs&Bp\jcb]zg 7IUUV*iT.^!ZabzMܴN:FV(q\UUL *%`Twg_} /C~駆  %Kgl< /z ZkKz`x 1*(J (hB& "8$;xPhߐduc4$ RlZ(Avоf;rdQRj?ҫ5kS+zդԭ?I` Ȓ0:$$<{C j|׹*&.+;]GiQ?)Dv G=>o^f??~?~~cV raIF~v F0h4BIA?n(lPIWT+> ҔF8S2)34}/"C+9Au5#W_L$dOTMOPy;"~ CBv'eny.&.j?f| R҆>ni"t$dDу7ج,ܢNwEA)4Y 1+ <F@JD?}IHw%B ^?7C2Hi؆Y)o3aa<ŋt_-L`&XCu܉ 7H" K4۷Srr4lS,s10O5xsC1ϪfSM$<>D6ז&Y,DY)wSY#ͤwXl9ICHyQgE ܛ;g!,, Yu$"oŢ;dR@5%gŰ]Cjt ]Eێ6t:)|.a9xdApL* Vj, 3Aspȕh9J/,*JZ}R]6"Ěx#`IÕK QI0kښ6~)>2TkP&8{Hc'訄 jP/S ı8CRИB)f" `K y$Qu.t.FitP~iG4ņD?Ect$Bc5DK3Ďe-aAmxx,eHo@7{?}W1 Y549$6҃Cs$)&_=+נüq=~#wϰh"5TTUU .vh؜Pp,t䙅ڪ<"&X;\ "i0XnD-V8w0\h@E6ÌMx=;Sz]B bʖN)[puU@ Ism= }D]`:i`"E8T7OB Ux-Y /VX0,H'Q-A[ HwE@ P=%" K05!v;q(:8N;PFr@EA@[!8 2D{Nt.<rLK"$PN0 Ì0?!@ D<0ʈZ9iӷFPSn&U~GrWLqnD%dDP!" Ǝ-A'{[KKgyU 'GHKku ȫ*5^ԣc;"Z FR)b!|fy 3n<.rk.v۸\ BAu>A`;uj ZrJiJ ؄"n%%چp'in74Hbm0\n@32Hٟkj+Ϋ.n','.NyP(gFT&pÓ e %y.a%g BQE B*S9mbMD֌L#\p!( %=y# M{!$(r"YEcjX^Z=uHdɇl`y̪p̊ml&K#!v3T*d-l!Tҭlg %V6mڲsMbKd,[#F2$ 0aq `q1mml LѠ1 l d\c \b#l(&EmӠv9!B6q,0Tbɲe `v .]v (0H $ @Ids&kk V$ UCPUUUCUCE Cldv;` 6v,"ham&՝ml;Um#d&)0ClmUj[hmlՇh0h6m'lVm5Ѷڪ(mmU[j+m6l.ڪ JrLLRd7~;˥N^&\(ZW&t {ZӳW܉N8i\Ld]B>!}ь󜈚E^Nh—`H0,Q>'|R)1ާ盩.N`v]A'Pf$u uÙ Qg7yQ׽uCh49 ߂9-/e@d HDET6Ě%i2hH|o Ϣ֊0jUw!hxc"KEԣ*:"g:~jKz̀m WR 0%j*@zL;:^ #0!ޙRƇ5Pdh`eϹ(ypIn6=dw_CLw!҈]~$=}`F>? MRp=i)Q ͆JKk5ܛD^k(dA%ԧv֊|I{}%M pKC`V[hwA@u3%vw7M78 JyU-u!c V XIȘa[1bkBYP@zчz[9(dtWnNG0]C4N2:%Dj 󢒦R"Q`):F'"3Rze1LQ{]Q˘s$6NVwB&2#s02Fdn!: Meڟ@8lH&"PV;QB0naS"YD 0 3 ch[vZAP9/ 鍂fBDF JnsK\4#VyLkMmH2>|H[J*dDӇV2zBUKeJJ2bJ>+ b("J64;)D ,TGVO&F&P p>_~`+M{pxB9;Yd E ] DCx@ر^̓Ð|C}`sJGrVX9}40p69Ht()4RP"dE nT0@04{C{eh|E0 CSQTU LJ\’M[bH0y<tEa8ñ9 1  | ͘ıwJk4r e46-`l lDȑJGk.JQ+J:ɕiSH\h/hT֋" /T@"@%,ҳ򋣬I&8L$׷CDI"HȤD?'ulvI='5yZs9o|A'b͓9iZDhLf;NU\EYIB rtIP37oNi5z`PI̅()9I 00DP'X f >0$'+(/$`93t;mu A[ HwC%:,ܺel&xDlQm3h8w]OUʃ|?֐DnJPm|my}{_ _(~ 0RI̳roz aGĆ\ɹ.kg766% dÍH,-T-tsW ;7}޳rFW>f"5ATt(m8Q:>L!z9UY_:"%;Dʍ2+̂!Jy;l5bERI*$ ׸: !nKŽ?-|$KO |dR٘Z'y_\ŀ+圮lӒ\;R5NC.~`IM)=&$* S*Q>ïADOxU ! WBCORA(e!jt$R$¨X6Sq!xo3pC`\- jZ^$>S jbB@hh3b܀.&W"}lؑua`D=ahQH c BQ5FeBC(ܺ-GkJ~T; \ 0B ocU !`7?A;4T|D()D'r A`B!DCtPI!Ut.@IOFS*pIq $$TU8Ȁ2`BAbD!BHBDAd:ĀQ7 y@ ?II$S0K DDBO27Vܩ~* 6GgKt۴lqp`?ӡ|xXl\UwA X.# i}*t8:9~HGLP.HDsMp+z`FI ,U6͠h @=iDJA7`ojLq4ZK+;csJ!+ `ȐƘ9]}X(!@?7^7A}Mư&%ǙL;[?%' %.!t:R[D1p'4[Y\Sρ.rKpꙮ\ =`ٰŠM$"bpC Nۣ|DPAF&  D1:^/HéUT DY{Fީ# `ψp,fkX`H vl^v2;rSaPa9c}uZۘhhEQP{H]\&IetU3yL;6C'O!4C NABH PJ uNP)2u3$R>K$A4 Xi鰤 j}4Jhp ;F)}ۊ\ѷX&>JbV0B'&!O=@Y$PG\<&[8}hXT@<0SȆ&5\`]N %"nS6\ `֛wbVG@A { okZҞD4ez&`j>VXTa}B;/@-*\ϲŃ"زKz  H%`bYB!P Q*zAHHQ %6N@T!]pZCQ͑O RR~M.I!+0HXqfb"@)( J"c2ږ L ͗J,XJ_fYOBa֞&bI%26*%3뢫M0,U6 ~ld&%r2ғPԔ!)D@JA!$ǡ% 6%a`rwpoζ9/mlp p)[mUS!0 @sYqJ T =tv8"H+\z)x>b]8;pдAT6Jޡ I"j @aU xH )ae)]`e2`l2/H>\|6ۧ!qy=:\qCm DE +96; ?n.Vn෪䱂`6ad8}}Ie6[6yv'XXvR҄ ODC'GPZ6E^Ef'ф 0=R< ܖd) Z@E0G A1DA &8@ !$">*: 0*`;Hp@+: ;$d "`qGXɑVP,)'Y0Dh紃6b v̒l[ f'rn২V;6MfjHAEOi'ɗ' :uy*ZUѵ uIާY6h.qkP%.=X* r6@Uo DѤ<8 aS:pl[? 2U(|U,äj=~)$zv CG B҃\WN: lwmRxamKHkf(=|t56zf74J4,rdRM Q U\> 3I}$ npțL.IƳ442F*Qd,$4J2K4 p ŸDOS%B<\QD5PEpdqb$k7'mr\, Fʻi>цK|նAT|QDRD """B>q (0,DJ(y/rH @t㞌HO!v0`/# `̯ B".F!ӕ@v%.;dg|GY@LgpAm60' ɰaD 8 EJ NZZY 0i`KEȅe!dl ` F@ `DLYSS-Ь|e? 6Xd49wWq. QlDpr2:1YcB\ c` #dPH@Fqz/#V}UֳUEUX$<뉥_ ՐLXx>袘 oPTE |IovtFi-l jQ2XP)W0"E% )='Gck$s$3d ȸ+rR6v,dzb_xf !3PyX닸 'l8D@x.&㼫"7 l(,f.H_#Y@x*'`flLMȁؾ+̢@:% YVLs#WRas\%*BzG iҗ6ˋ4bA\t.;#I)6R0|fD P N—0 $2) D%Y{0Bv(v(Ǖh0jw))Tmf >= k\V}l< .˿Ѝ/U"W1[SJlX%ljhKL%Rc VR"=yDsY-e2絤iuVӍ\8+^1ҋZ`gFS"WGyn? 8?$h GW΂`L`hB9 CF29gzYcOIIi$9o0w0]-HOJֺI5:=Q<xIIIIHxyOGgp]@dY>p >7KC3KIJHmQi)IG!w*\C OdX_C傩i4+@ Sp*YK!1H9d`CIdtMzdϋ~QBw$e!hhѡѡFA1/x/EO°P1+2 QɉCBЏaH9% h1؟Ka PÖKCƀ/jzzhazn.FCQ :cp ĹPB&A,ICQDa>xBB kL(5%˛;M҄huy9I`NW-_(%!45ӳmAf=GQG  ,%͚`<``p 2O$Zf~xɠXS"'\MAM05@0iB4N Tf(p CjܳP:t^GRd/"r-v`&aUEFbog #L]N1 9GGʑCn5ݴ-_ǩ~_ꪄ^yi P t@tJQU1OOɶ8s[18{v2$! =iQn΂o4Wp(Kwﴀlށ/֨xW%YZĨThdo!_xΊ|FHLV3T5 U4ޭSr"B%0?Yq9;K02K$e v-* KdK#c&|8z$C!di9 >+ =FeΎZ{LTb+I^vniJhCA(@΋K73+:♀v \6 es FlV!A"[59uCP}K|z E7_ZXI֙hMH]B#!!f[ #Q] uA.d^;ȅdMF.r2e(^aq@,}PpI;e`!aP6ـr4CTmȠLϱN0\e aha"clkb/sIqG#!./$'Ix M@zk AfҨJ vLR4}Btd`=(͇=2 .o(I ؆fH/OlLE rrLAbA201؆X $a!|G8PLx;I)E1(@A}OJi!f X )E=)sx0N׿[dmkhHʣ2a Z*8`hbR>ѫ A00 Qt畁FIPK|w0̣* *+7߁\ Ń=B$s"IYb+=RX$O*Լ(`f\yh:5ޘ`{[ƿK=B݄ n7KCb9 dì'i#6QBD0OFD7\^+ʫ:Aͨ4۽XU]{&i){0̣HBm#/SOXT!ZXzhE-,3Ku#^C3.BVU2M;>8s\PmThu:A?.>2 /X0EH0bt4=.\)F5 $Bɒ$@QeP< ]"r&#ѷFմQ_ 2,7M in4'EЧ2DQzW7AՂ.Ȗ~]n/%"5r+,CnϾ1`lMxDQl9I=(.sh/\%); d9$l{ k*Haַ|!q:Xv`l801J{<v>d[* AA"gtHɕg bBX!>OzVX" 4#,%B A p.0`yHEy @HxGaDGԴ*z+/7\ k)J "X! E;3 v#),fQ}JAcxBąh ud1"JbB4=)cQnNd &Ø DءvԐ,x*u )'c_a\0%>2^ǔ"4BOUUUwjwުFo{c !R\-٦ ՟?[b({ t:"s2@.id }$+2v-ɩD D@3 e"]J/kښ͇Ec&`$K%>ߗoAap#'EjHe@u2@BK 2ay OOGE[w^ +_B@rnLYtg$z% h!6.B tHqIJPv?abiR6Ak+8ya""* b({:/N x*"P!] Ըt0`JB^ (lА(4H@|ZG (dE`xĕD# SB.lKd0A GԐ<*֐XX)J IT߄mCGUS(OlB &sdNf a:twc71ÍQԆPPSLYJK b=n['Fnfl0xa18aC?@LSIdNN 6۶ !Ϋp2~Y;56zGpdYzü]Ub@8. Aq4 B" נ$Hxmi5C{=ޟ[S -H:(vdJ38 ou4&<ڔX1CjdZq6߄GA`1YK(M;3` >> `"˘p.ոQ L/b ̟EPHeZҨ& /B4 d 8zL$ #<$\@I^ L} $9C' ,%,,I&ϯ?ހGngE]-I7 .s'0Zj )]#H*k&\Cm4M9Z|>6'l=$=jV!ѶB2nJ?M$O3ґj$ސ^5 FZ~s]TI}\p?o)& `@?9gy ~d(hښj_x@7́>4JkM{6aCCކ@k@ԛ˜ U~1"s$$wq0",dM,'|h"dPdEռtC ]H"+)a306 ~&V7&_IoV.Y(;s!'T-M1+p/zº0Wi\-60+&Nb.]s#poh0K$N'Qmff0r X\@Q.oJ>dRJ͇P;b)[8A^#n4((.,Ϣ (I/KIX!4CUIh8$(ސniX%inmr)(sV! &9&!O?"Dr xQ~>1o>$bO_v6 Afj0-c j ZKD)gL{25.` NCޔdbm Zj8p e%uR!4$(A!E%o Z9-$ӁrTMI.X |VL' L)t `z^>2@x)GM_Hwa)nHBk@ȁ%)KRnDGC#j 0f@D A: "{(MhӦ /C2 6UX1 aH6%~6@xn$:U*r__6#,>*B,} D鋆83']օ=g184>ļb& q 8_h>6/%|#?F3etE: RP}Fە p. NIi\(Xv܇@:eb_d#.i BcCBa1x>f(d"Pb'A"W._Nu<4 X:*qW,9 f4B2 ?úkX( _Dܐ;թ41/o_Rlzeym4_Z>_Η9>!p)Q(\.:*>@ /#ڜi֚qA M!XLmx!z89 z/v:^S3j ' z ēT>^Hs5gzQA8wV DQR*)a5> ޺G[W䣛ahc?&iTjp)l@8xra|uMqXcMbnOcr+@锚"Z,P_Ѹ ) j ovC4 ȗǔFPk!ddt{"aQ|@pURkxY7.FynTo~H~רZ֌"TnSMOHA><}ߴqL<0K?҄?-qj9$i}|y y4K!QVCˏޛP)Q%>v(] kF-swkպ*Aם&l_ZjOhjCLۻce;e6V/WK i H޲k`S8T(R^5n:KQu=763%=IPeFF=Vd=@Cp@nڬx׬.GQ4DG(#r]w[-O z5%4UPfu@lx {/O, ׹v'y6䙰_͉-(pd9.kZhX#!Yy$z!^xD 0iQΚ6+C iUTIQCR&pM6c@|J^1lCoH\jhy&'0L۞Wx} "Ja>,> ǡa660{uk6F6C@NPְ8I:)3 1Yn#ڞ 0뱁l@}t ۉ@hIqݰh_7phA)H(k8>dHZ08j*,BBLL$ͩrx7@N:(KCuD#Ԑ7'~i|L3:H;=8ij Kr՛s]N.ޝ@яՁah8bIA(֚K ~Bm 4Aƚ.37e.Zn bHt!,!3|dԽzf ,^ó34n!嚼Mfp2jkXCbY(n| JCC(M5.:3L j 2YvIOYkc6^bEQՊ{XmԤR$Y $[NzIA;6{1ܝ廜 c + V* P2dh War(C  蹆;P/iu>8 LGtm_9z[׳Ǔ ,E a)s ,k1u ! d<^N>ܶ7,\uKbyHN5 f yLv9aӵ7L'E3H^.̬q J Ar0b)NJYLe X(H`s 2Ò=(.oDt~f49p2 Y(ljEMfG[[XHI<ε[ pP *b*)K,RP?x*"Լ**R$Mv&Gnyz˖{w3bNnfJ9e% FCĹ%.uiGtX\654evïQZ97'`;UDnI݃CrYu̗ޚobaM {j 1P,`=XD;rT!Xw0,vюM.7 oK%l!f=EGA!q!bS2)/Y>#澐L'qL dhxbkycA5%.K'1̜ο3.Mɑჸ&QsuLa,5CJ`r/UWR!ajB I%bT4|B40`M,"{vJ0J GA7Mpbq 6hlT7A84"C a[.lPT"[i"ϡVIԶ4Xĉu\=RttXwa9D|'!HI(t44|U'(Mn5ƛ خ))b{|L+֒́0|J7r]qKOqd5m^ a{a e8Oc; yZCyJXuy7%Wo/lc\O_GxñlXt!byB RC @:8H'+6/ 4,0 H1u-`]e䁌$( $mhѬiA˓Ge{;zPia,.i qh:J}2 A4N }q" )"h( $j"%%QATQAP%L*6?ו9 x҆Xˆ(vۿޱ*+(k7گĊ=|r@.2ؒ3&J1 5+z::4[xCVUcdVG܏hLi _$ K?3 [j c&i2jx8x.kRsrڴL 0h./&:Y3F!^n5gF ̔x4lyƞcZ.I?U5*Ū0IiEO1æΞx}SXȑtUgk= U{TG2ѵs5!&pV0~|E[H6vZwf!s V6F-c=Q?ïty$FHFX (Bjb""MmO6P͡|:?̈́lBљSQ( sMߺݩ..+%w* *fDyYX7M6i5 ~=h@˳V6s'Nt@tUUUYt5g"SDD(2QERFB8FE3ozO@.A|MI26l/,x_ZKI7N~72(xidk䪟߻=\2p _ݸnbM)exNxe3\jkuvI9OHu=uwP^佉]PhMG< .Ucai7ޤ 1xvuHdzT2\OpurIuGR[}V}"s']t4&3ғȟ=+ /޾^ypdԄ`k0QfA v ZJLɭм2lC87 Xl;)!(BDCmLc=hB!:./PICE!>"bxn+Y ig[s/fꝤᰶ,lllq$~ƚxԃ˶ǺrE~|Z֗DKUTDះ]qA Wjm#*<^5=226KI Ӷ f9!`M%23030ӏ g! Q.i糣[EIi%$?{>t ? FÙj!7 fK4XW4[wvqnZr27;$vpwom NPHآF ,.E)IEE‰ϗ>D0wA50܉6.C=][9LJ2Ih&%EYoNf"@as-u|P:-û@0O\*h9h2 5NN\<\yr5w?s}~_<&عQғջѫPjҨ&Ɣ[!T_=E,g8b7GnKZf4 [>O>`ڼir^<귏.Zo^.A=/Oãuugmn`;Ce/z *5ϏZ-x;cUoffrj8QxJ: d*aBV&iC̾ݹȷ;&e *ŮNFرU %?<7T^.jJ9Oo׎s-`"S&/$ƒl>Jg|wHr]W.iML'˭K+TvӉA;g@ h=*#]rETQRA+#xn<혀VY 9r" >hǩժUaȅX&vYk:_ u[ "iZ﷡D_^76ݕ8Mj=^-F+sFG[.Ĵ1cץA^)$#DB)x,LHU,T2- DTB0Q )wlA)fh>vkp #I~ĨLm&U*2I$ Tv@&61·h:](RpCWɋP`~qAC6uat>5{7Uu?_8SQ1IDE!  DTHtg4+G _#+N*D6$ 1I P"O1p8 *Lfh76q& I ,1ϴR"w``5@VڔA A! _,$hJ3  i&DG >I"FMpIkDi D@? . |YP}@Km!LKl@튴 ϒ}8}_uiFN1lLhҔxum)=oK ϏƐ-( 5ޗƉkOi&Z& U`j*u)#\D@`5IacLu5\V`J$!r g`I}+폒Df&r䣼Hӗz[Yg xr\v-)0Dt@s>@Hgu|~WjeC}nf$݆͝ +9^&c ! #@)g@ L@ɖM%B(xC-,'?A9(őhB'K% 2uk,i?4E$fASLkܶ-J*gd==Q>dp2B&Ci+cWY%S 7t&GVE %bN|CG£2d,PL0C4<;)^0uQ`M7X!_%&;h FD}v@@b8;abv7J\ᒮVmHOFdNԺ:煖F{p`"ڽ SIuLoBz8|G%r*$==6d$L'qLȽi!Yk{"2iYy)Mɔ.pcx,"& rGs:=.d2G^FSEdl軐 X:55c].bfq3"x*N k"NJ,$z<}W_ PedDh Z`փq2$̨ZQYs"w1xaޚg3A M+T~>Ѝ'c0!rEKHQ8`:vDOI=kS#)@Q )RT"@yڭ,,qib;; ˣ?!ըE@h-;|P( ףA@j H,1{q{oHvt߸y_9dPjr[%D:Je$ܥ1B351%,KM|$҅KCOp)0T.>a>n!}@-su*rǥ"H!fpT^%{]菠ldWEQsӨؑ|s-ϥNѭP|:NgAQd=f; hF)P,XcO4CoZ;>ﰇ ł8 }S"xS}"?>DСL2H %4 BL@DD'phFS?/$D("""YX   $ wU˯s0}ARj kBT>OnG`7PMށ K/5Š=c"!Jx X"p aLziDUd3! H{,wD\)HH 耤=imIY ƛ_f4ڿnx!pشlI˪i8yS 2YЅ461mA(Rb$L-X>PHEr&CKUTasw@z_+R(")"$Eb(((/Phi$4[xf`3TJ:V!?[#H&k5^U}S.zLRR |O)=>doNET]iP@;c = JHA   Aa\GWD |dPOtl<`'ju~#(h"BȯM"`h9/f/RȜ:$H≂ 6y>TXJ1H>?p#FoF\y|kTɘ R!DX8#sOr.ܔq*ABF=j44:Zutm([%.7Ӝ؂[{]["Yui]\:m,4dZN95#D G@iW9aV8eJQ!ݥ&-e@v8>ȟ'ώ?qIϾ{juݾ%>Fq"4c (}q<dbuɵ%y!_nS$tw,-:-~i(W#S~~@lC5Km)G*HEi\;pREMQJN/og/Nae$[|Su`] Ki4Id$;KD,'!x!=s"!V%) H)QDHԁO}eQOƟ.͡(U8Tpj2* BxPTt\D#ht6VøIX=%pdVL&aBQ ]x yz|?DuvYPLõ-:O/^۵ܝllj@AꙜ2!,̊vҫ\@RԀo X1kC-Sd|ػQ!k?:zi6ׄ t\NpuZʻKl"r۪'6ST&yEh bgE\7 }&sҸ-%Vfueeuq]]lKBZ [yt&]`R JAB ,IlٳːA~hke(zB_[ -u.t\4sx6n %"duXiJ'MQ ["_#/U]3o[3QR^z-ݥ-DbhT0NAli KH=YLޠ,&; ` "E$Y5&Q] 4hM!уM" #kQbr~nG鞠 wN "n,F$ *%ɰÿCd@xnl  !@C;\*D`M Ѓ~$)AAIEқu;pD!$ [YOf?vOR|KVSEI W:{ *Q'JR?*"0o vCi6XQTQ&T>zk%.>[>K\(4'xLĒ`> Z_#-ASA\ @ JsnKy*W#DVpJY\@H Z\DIBrM<)rnX} K 6lII{)DU/ O:n͔C(9=TX9-1 5,R* }g i+DpeXA׾xE=("H=aT τ9Zn8JjK!Aĝ<nb"5F 4H i 0`u)$)-"-#- Ydms>8W,SDt wⱘZ<ởz6bR@"+ϯ~Kă<6h\KHATdHQ@rȐ  !࠹Aj U`0p2}PNB gC:vN Csl b6X5a-Cq (fpL lE1s92Nq #W%L`VX90?EҒNQ_?"#/W**+Я} x~y7iJ)nWxIk =āg=Ž3 wW/M8.RN}Fo%ݧpħVcTABG}(oQ"@ʶz'[03a⩺>(0DL GD4Nrq':|I(a#ą&M?rb,cErMZçjI` G }!B&7۟ >vx 9|cA͓RH$Ր? |a2hB P{,o1}z#TUI@[1v8Wz*R|G8SS5IVQkW⍭oŪ}KBeVݲJb^m#}_?{MH);-h&JGل"Jj6>xxw^ű1Dg֐n'ԠV}w*g  K$*xJ[taA45, lȻ*KiaJseoI0P!goL~ CZ~q=yYUPvfw/D H mzT,!RFe$?S%UJ!z}@l)X`( wJZ^"vSjuc=0b)NpD9 ,HRb~7'"D0RLQ.F뜞A!.;t1b Bp\\Φb"Bm6C8Ɠ»MC#W xlu"w0h$P$"ߑo+Dɸ;faDR"@I|>{9Â۰97VJ]a Lb7g[r|6z!1$ "X~@ARQDi}nB^|mцDj@Pb ]t8hQaHܣdPSLv)nI>;^Iąu(z{ 5lw^MA fIED1"1I#W/;Ju{] PAM̓%rG<]4'FSMTGSUJG61t&^tq̢$^upe1c2"&/QѡU.ۤ2œ^@\^ ƪIc MiPV`# 89sG.dA<'LDu⮮ S۔i2QbH5SXCOB$&@MM0&xq1᧨ucM[\Y]2Ӱ6~< F~gCӢGlAtx{;LtHtqeRL1aoOI>-zz2rHLZ4¿# *; ".;Oxi8F%ъE'd:h aIJ[kq/C*܌z; HXS6I\'UW3"O몺SFWCsar`JpyU\-jB^*m2biXC,ɛ :-­;?6^HX( \$K/K- Ef\$%CD؃4np8p汵X D\9[TW@F_4@݇7"XŃ(5,N]Pv 3 ݀fBFإZ  .h^np=j?Q=z:G{ FT4L@~͚s?4/$7A0Da~Ԭ6nӏ F 6'(!pϣVϦE./XS@EFp∓\@s AJh8p t4PqUB}DŽ0 ~j^t4zV D#p7G/lbTxޯELcP2Ͽ0wHHOH߈HFX K|c!=p:r3Gmsr1NcI"c *|(G_-Ol:)6TzDEݺ~߄[W?i?cF#h%B4v\3:"d"mItԄj4T1L74~.s(O+]]'rDh,cEehTGG9X{}Iҟ NZ ?+$)DK|qhPG#* EP7ȴ8bb I:HRə(ARy*X!6ϱE븖NP 4K M#۝fJb+2OY0Z URM)ޏsX~ AH T)zʹW.k.;oȒ]f{D=+h([ ׸/NJ2U4:T>Ac11%!r> Kuwxa,<7vx*jL|\TrG6n9QBk۪t֫JecY94btS@^NaУؠ#TxJ!Ӵ8`DfLr mTjrʿZ"h10…{gio> 3ctsE/r:_/AB-km&|lt;rZULj# VR$ '#]sc44K 0!1avT*wUA`~F+\ʐo >([/puWR|v~Dm' 3];cB5Mg2ʬl L%ZH|z,(!%!ؘ8Y/>vک\ϯq a=* & 8r! ~(~@y:MAS  X1ٺ%qhԐ0_z-/^u MXH, 2;\sO>eʌ|JoD9 XFZ)E`h͵0ec4c,*XR 9,1ZE_ȍc+rfX*-zG,{vԘ&P2<5BVhc0ϻt1|k%2u{.r͗|FåؠXI!H0;jOe)KCKtQG*pf d/XQUB$^@teUbSV(mqvb^kK 5i'4<tHg'l0n$4a'|]oMZZMɫv +RpsvaimSL:6ibUliV`>ǪAz'~WnUhE|+7ٰ:S&JVښqN=ĠZr0-.P(:N() )eT2d+ArQ@$IJiɶh%G4dnIr ; z?>@L "% ؉:q<v+a*HI!/GKK`eЈBidNBPR- 4~]N+%MPB͝(3!ZP@)KD(6,Y&[)@e'\WƾA&ffC`c3ʭ׃!?‰PDHm'6`M0|2 8-M<}70fF%4Da$0R\=E"x,4&!'Z,ŀ52!rw v_[#f{vzL$(?/9;.掺ux:u?t\ w!DJ|n"Oy_T0Mes69P鎋(AT`Ƙ_"WiLAeEC9=T3"J0\%)ǂ |ߑK?UUڻ~Ŵx+= 4H:!D8 é,U}o[db /<]<u9Ttt3 _MIQgZٴDb'[-!t0<ژ":H`Lgq7U9͓a4Z uձ?S!McXSwƭYr]X?c^C񅺕R9bDZ4Лt+AE?AGS$T;Zb"z&± jFJ!R*7etv}?5%͸|lE%ĕɩjgb^}QH*} RB >ْïm} B f-&K7(i>-^ϛZ MJ0}~gP3 ",0[e1 i~;T DE(.NI@^"D!V:w0HQs>ud5Ta!>/#E ⪒&nq1L/7"'RuC2FE[ {{Ň1ؙs.ZYE3Hxrm##"bOͶ8u]EO.\s?:dI 7a"D7MD#OR$Od CXq¹Uz^ u ›8HE !")S!W!ԙ4#Fa V6.;=f:!MBЀ8nMXN- $(hx]^{Pi+jR0!'4 dGA?nf;` +`zD%_^qp,QrJ$5{CNv. bgF2 6"Ķ`[` Ժ<-ԇe,HTh>@bz雜0oGL *2%(B8K F|@񣘼NeX2AU 8Hn.Ģ!UwOt7־8l(+@ߊ7ۻQjSNm@^w/}[CZ 9JLXyAI DQEipqiAfG݂u@8Q;9`+.l}"ssѐz-M!^8 Wrh8! {tL@:"Q±R(9>aaJH 'h u6O3>G|'x@a@A $QܢQxnGʇlI5d]&Cy (,MXYo%$n:)O(zQOCכ߿LNyP/[DB䁄vM/~9W}a#$/DHfx~M'{0$w2YXt;Cf .ߺ>P`dQJbRB ?vYEȈES({.rƒ7j 1(<<ƟX&&Prx9L9~2|hRIx;фf2R%6ɓl0ڬH0-@`-E ؗI!c Y/7e9H?& R8STf붆puٱ lZ1*P1HkR[1cs͜\e/'x /It;W(bx^-!^O#Z&1i(hYMԼis먈])4LIHTp,T"qO]qk zd굥ʩ7ݚZ3vL֩UCF0/h蔭Ua;ą0!}NU)免+*buS)gUeX9eټUc]JjYqE+dFerV [ vE&؄j !"/t\;m۲2 oA0> 'š4!kQZ"4"9Jߏ0e­=,lE Ml c<{ALى;b D@uN/ (G 3lb@>c< ph vsgT &N"@wu5wC#iX\yR1OH=ye"!^,{weJ_>  b1xÑi>y8 zh}F j4 O B,H1P:=E>`8<}h*+C m & +z/\C%άAMAd%ǪT.0GyLObt' \PrTKQ`C ڈDDCRDiV 'S MFAvjL`b"C8TH`! 3%ka{wH}7qa [JZZZh)Wy=(`,(,bۄ@pX.忦.iEaEDE=KN}WN^[hÓF 260DFb@ mŰ;} Q{KfNF[{+P;he(E$p@S, J/'qـ"l%fFZ(P0YAӾ$HMuUҵ(xI^ Fj~GFR<(>Eb")(?0E J]qhc?Z ,jS I}gC"!uo h_&!@4(D \lu 0E>+3/ԞĪDCQJ(:AQȟh-wm=Rf9C _'z~B(v -T Hρe(h52 Qk$IJĎ!(aOS?%b#xl[fZTv8+}*lّm"ݫ= 'CQqD"+ظG J>lɛxl6 V181Md-RK~O tI ݆XRqx͞`fG ҷP5۫G/o)Xf̙6¨Z (k6Y ۰ȣ 4j2QFzBzmRBIPAlPMAtCdc")6m?kRrAʜ)Ik0=Giӳ34Le` 2zp p GpOU [(QBHOldXd B )j!J@ V((D(Ia()@~`` D Chs : Y@,H0 D? }+;@Q jih!Ӯ;u#{#|&xlie5t^=EP?8DAKG Y0CI=Icv|⃗ > @>0zU*t+doQvC9d2e3H9R֧0_ E7ob, uM 4B4 TzZ"'.~z`'݂^8|np"?{DvMy abDcj*9\i!=H|į#ԞJj"R*dYup*I,E■ *26\yCrӼ$e 0.ʉ}>L}jQS07cjJ*T$1B|Lk#ᢪULI$"IpL8'}#$}'1_#t\R\91RQ:.pZ^ ::C!j\PVTa'\37 Ɏagۛr.FbC@3OO3y)ܚ#8 u$(h$$-F}\w9$rH9 ÂDi2^x\̱ Zəoj<;&R @!"纷//!I$`F,)vFE~혒C@m t*rx=xCF~B 6g.ș:Q,؄FrG@ݏ0̹DRnpSs\ؠ츽Fw[b42'J%P(+ 12wv*ǤѺM Tb&y\5XtB%{)LJGmTku۝hLTV ®*'1 p:hfF8Db6Omqw拋[4t@v`5&И` |>M4dn2cS,l0CEzn-]yz4䃽j1ڠo=JQQ11z}]'IATQ &Gˬ4L ܢ&wԒUpT1}uAX?MAB#S_X&M086}UâټKd\ HH:DC\D<|I,3ʙ8Y3cqC&\j(mkRX 9hxQ?,^#{%!K]CSCH_ù _h~[΅YHB/k8C[HP 6" MUk,bIf6߃P@9m׍>'(c.#'s8e08i}=y}s<@%.>fky)0xG)! Cg1zov jirݳXH^#@ ʸ 9a0Ɨç^zI,nj1VZ r!$`#u$ DԲ@*}NI P$)n$5^X7鄃 e퀲EQ$dzx^( v1&" +$8)tLB =:zBaC`l ׅ38q峦X6x]]ptrѱ&. <:2&*s9(r4 B$! n_V(dX6. c1vMҜa=4f|=[“E2Dq,52M,€HťqXHPE*.X*Sb;Gb>$O_?ҩ*j"I!$BTӶ|*W 1 3%e(}Vj]>sH$FT tyb ^= `jL,r*ܞ舓 Ha\6/*=rpV=#At99i̺kkQf0e"vg:LKIkOu <<_#G`PQ*aF S Hnr%\4Qlj*DO2 @)BP:<><,ͣ@ `@ցhmBEg@z%h iwZʶWK(J|K\=AST.M 9^ Cu*}]b! 啲A:e7K:$UtDUw֋]O! '؊H hpIWJ ak FepA'H"PB O A h˅ vCR !G `GH&XmGلRvvnUMM,niY /29D?b \#0^ !l"TZ` Ҙ ZgK/9A͏it$" ,AU[7.gas`\F. ,VbM3T@q dT}dT9GQ :M:ḯ2x;ZA㮏nt@ ʧVX I CErDJՋQQtV2_WŭC8pHB*@4+f1f$6,wў:TA!{d> `!*?9pN` 3A v g27v@ KGSVDPIIMnU?aeÕk@(e w6BX'!1NF~+ ˟`Ga#ySD[0obU'CJ vȽC՞9ubdDuH2Ii;GV˖E C@kKAT5W"DA/AN Up1 Wu"jb( "*JjnY" "* bf)$*ԇ5] cE8-qIdJOOQըC)A9!ېi382%G";FƠ4$II@5*Qvb`)1HFb Õ c~0Puxk 蘦k(I8X<  lJC^ O*GD}cQ54>x>ìC>yI Ѥ-1X(vZś&"# rՒ-0`=>k.n>dL}~wz\a`^.{`"/0$~I5.\-N YH Q koS!i\F^3]$+ٴ*FaGA%qλ[C`CfA A!6^ΙA tשcӳj"bƜka%)*Cn:#hO3h Xp0Q:C)B!9-Mk# Sd\N;Cu cA{@C,4sOkW>JlfBO[ >*WF˾0YCSQ`HFDr kZZR4;bz*jH|q(얁! B~1   , c5pP =`@ $dmx `phu%þO|DP $2(R^9~!>^|U#N6@U|_1)-bj>VƤh/Ǘৃf:B"<^THY@Hd1Y3^\1 L2UXS *L %bD0ZډKr,.p/DXN'ԯ2Lq 2*`j@+":ʬ4RqV%(qP4bYvnCn`in) ,D2IXXY$A ZB< 'W- .: 1A{hiJ;zw=M/dvy:w' 'p$ 4R]:x@PP"!8:8cEX5\-oРuPv]Ͳ N 8'AムG'ed*(HD)AЙJGO 8]آ! UdD;0P/?-ʀrEN%nJbZ+DʌzA:/t9ʧC˶¼B?YӂW&q! -CpgP֢=C*a;J=']DW=0pQq*XB$" Z dH BIeRJHHa !%)2TE$2LS#p8 4\F_p'Hz澑r};JyDÎd( W"`ga4$CJCA"JPH>oY>zY!x& ds[ d!Hȗ a)1PbG9hyIMPF'!aV%>j+6{ ΔQ$L@TCKL)`|v%<&Ѝʹ v0 t ΊDPPb h -T \ ?ljydw@Nq^ {(voP:Ì#34HB"RD>-II~.BZUXcI;HRlvhMX`> J:mȃ(V(:UfOCp D\6% 6* N8b^=4bC{/޾,z/ʕ9.HscǏo;JUEXR*꒸ғ6acT{鎕E.W RI!$YeS{ju]UZ$5DC~`VR@C@T(b I"H(9Kl#XI14x엏zqxzB|n>H^N&*bxE!B>FBXS_q~j}c (ḓ('Q؛8lZY@C&OM*>xqo9B Kj9A 4-|sCW}{6# =Q%R:CHGr)_!$ yqN~# AxDPY$~?f? |EO}N`z? ~'$)Vs(q.q,@ X(>DH1aD/>Ǹb먨{{wjD2.܀0\`6 j|L5z@K$CfE *e } x9⧭Xfz.NSp Ix%:x?D $ewHN K# D>}Z1LMv1_ׂ;0:8:++fLfT* @e CUEsrS]tXJkX9@p aAR:R6E(nE=ZZVY OH[lZA6GH_*/ˬ e JAXd`bA"?һ 8'߀]N@^07} S1 w!.`y|ʚZPEz#ii0UACi6kdUyIIcgw\ N0,X&\B[ɚC=ﱜ 8'!|-DMRAפֿ('F\ڊМG>,'Hw?CЁ"~ii;qGQ+5%<Ɵb&"i"! e%و>Dd"k#O ?/3?r<_?I=4pQ@ HQ)v|Hz+ =bRGܲ;D ؖ82&ϯʢb(H*O3¹j(s}--DsN"a|©Ɣ{@(?قI疷٨) p%BEN(뷮9 "l!`IQ%WgGֈ>~_DU0W@0^0xYɕ̨Iй\ʪJfZc~yVZ =ԞM'EGGDOe[3]׃tGއc}8I2(c"Q[mUF$P{3h{xl7peD`1Y.UbA}ףCm B|v ݭ ҍLs(^@?Tֳ` tCrr;;+ZU!C?r%uXyМKm}o`5nosgΔBڸCRQ;hCJjf(H$nQ" B[u.f/DX Sl IEQ[%Yl)ξ>]=mZkmcmW˰=D}N>j0'Lrt9lOB?-0dIRhtncY.' \: D jHDyFHj*C;>Ní>B`>!FK%kM+@:)T/hK‡(\llWT hQd伓K7'uB#E$q0)D~z:I lNڞz{/AaQ!ϰFD SgE9A*Tvr)<|lA!6sX#PX*f__~>RM8oZRzhN գ r/l@2h O`\!2J7_` N17gc]zDچDM`Z_n[4$sC GNb6Nc NB<B7#NNIy8c\>䎺Ƅz::PBkH^ͲqҥS?biH%Сp3OzQ4 ][~hb!d35Z} ;?y?"|N M_O]Ԧ?j)G8olAQ?#?Ui) 'Tu0FU^5Z::2!`&^b ;.%q*B8m-R(0u/=vB!(ı,؉982r FL]-lGINw$c{Ȼ;!`9rJTLE&4>bf䛜qh*(7JCNB#l2 t*ldnG)U$ԙ/94~TW2uAU%sMAHUՒBMWuwtQH#wi)K* $f{:tZt D`d۸6b"IRR/)2L!@]`tHCAAAMcu<Z(6LFLi [")F>X?NyQ0;2*+2I.|tIEfKMR`]s=;\`wѧ0 (!6Yr(Ju0Sov ^A1&Fyy'@yQDԔ>퀥 CP5QҒ!~-c% N^v(f! /{GqEXƊi>D(((((((.n()((i0SI  &M4' )(}hK 1JDz; $`į D@F%wg>OGAa-/!wKKOt 34tpTW!PaAɌa!/@Oc?Cꪪ?̂?VS kYhRh2xhXRH$|_z) OF~QNvBwZ{|:?Fv # Dq5q`jTaHbB%("h#"(刢@W{M2HD2G ;rp/G!w~{s!v0XC}.`تGlpxsy:Mld;tt APE%\ 'D[\!V9"n&vF.̓;N=XB1Ԓ'X\CMDD[3G1 x@\6TtWjMwbzöe> ]pKC,I_3 ؑ&%\B㓐:!'sNŗۋ{ dNdWvXAc9RQf7Ye`' :wTl0# gjtQJyC}5r*5yuA!P21B$5DF($FFۛ`v >@L Hmb>((KpOG{(& fXRX 8ˆ֬F\(aY C W`gt!!Sv}_%X(*e}_ @PL*TG:)N*D[0(-A E|^CŏA,#琊!J!J0%i:)9a"EOlQh ґz!"؉b~3&V %@MI!‘ܼcHCP"q?'o#Pwa>/>FkOWHa`C@Y6xp8PY3'6(턁SH>q b*E BҀ^@:XT9') aRW Hc)AI@T S!KYjTJf&X&()!0Cs $8!T(.21BP/F; yRPbEHd$|?o@'`S@gp}z".1DA @4p=@{T!&1H\M!!#D8@aG@~IP$eUhM7jc9qhHwǓ&4::`Ґ~~%~^8/cM˜ˑ}Ys_:XM5((Xs8IΠ=WU)i(Fz1ӯu "1TgTCRM4 @y $efihYk̲`yxp<oy]:dLb>Ny8rч|ç0 QUvsc Jm*(YI&3EICUYլ0X ]Z ؀ka+7JD>~_k3p_OHh 蓾%)zRQH=!p^w9OWsNQbk)*>+侮$C)=Gw_KtyCG&  oPQ94x1c>KWURֈZ,=%@ˌ"lޥg\8]UK#-nKjbAo/_#M㬜DJ(6e@붫s{@%)`6Š.-찷uVr7~ 48ۭгȾ .:W ֶC((aWHF-R/:*;wC3*%|I@L5_b9'{wϱr8T}l|,Q=q%~0 yRm2^ IG(kb݀MH!$:ݮS\+ҵoš^R1cfD J*EݗM!`R4<6T  @:iUPЕ*Ӻr (e+T#aǥc=1PhZ*6 $dn`AӔ :- 94Lf2zR`:) ow7?(ZO//kS!{D`x\C/xa#3@@:Qqb }R0xl{{7,"P!7VИRX-E{7kK*{C؞vH~-7ȥYFd_7$i>$I~-]{bTDb@oXd_-?q ֹd9X4I-)XHh=z@{"lk EF!?4pD?*f?7= icEIUP Cwb}}CkYP>V)=,D^n[#%PiK$ xQ$,b%(#O*@˞ PZQB]yMX2ahl d}qbc853Fӭ:O9h܃>T ?t8=l .d$In6{ 41mTE68g1"DbU>(`;Ak9K/:$BU![I)JGGlR|}|[&2{X?zeCK:<@#y dEzk27`j% :%XMmmi띞4$d=(;CMѷ>Cˍô'bŠ- 2` #M<\Ra$0q#L=A=huBB$ Yum)op-|ae ޸n=Hu8 HlxRN8{uY?Ex&ꪨ 5HHrӵ#Kȶ=nc};:4ljGVcc7[ Ȫsw|T;- EIB.bzssbSȓg' GBKTlkZ%ǧMك<3$j%BN*PJށk C]a-Gs]UX$'(dv'Cz ̄BSlחͭ8j!70tΚtXBP!!t(/ mH:(ēi ăewm{5{*.ZghFNadںeؓūbT(6WS zH edydVh ި(.N0ѫJh& pj)5v+sVp^E\^8ahgI=?AtqX$oۛhҲxJ< 1]ӈ038z>_0 Pms\Y T)ǦWT PWT'AowFQW])#j_rmH;RAYL 6]Nc H-Ey-lD #bU29mjQo'*~=]\lX5B[^M^AkV>oTMLҝI&-j=-T=NA~#%t]uX~|hW?zLpԱbD_ئiAPR+P)(梫%Xf)p'HHl.5 Ǩzae+UQEq4Py29)2d ɂ?I !]I$#^*1"^ V x4LFU hb\WJQ , *N,1=[ip 'q`bQr{Z9Q:yZBqRy1dgjS*xi"΅̪CQ fB @!P@7d^͖KJX/ya$d 46$kz`6u VR`1= !I`3eE{`xjRX A @\u(E ]3M9'mx4ELj%4B-z&$DjZTTnǸ{F;*:hF "h@(`cW $!c!xR$.P<״oBJ5JCO]t%Q|N2fGjS%.!0ЙCsbP]C(T% kD'STU 7mb !K g(Ŋ彆8E n®n8(s{/}m -oF8=\4ژGz!m`GfX+jՎ j3 9zPD1VBBBlSe̴㉞\ AS*?#%r D'Hz`(&ǡPQKntp}ET@{o5zyF =^}xN.q6AjT bOF_U0BpY O7|w@P+:YQ[+C(+6.pG ҡ~SĽʍЇkHhM%*: PHtJ<P:B PBh h*G)P 4CCJb <8JR*=F孀 7,ّLQL4 7QW@2}7JyEC>mM=m( %kjJxT*u<X0p  ֤l]r0h-)Kd5OV6:RD>B5P}!@[FPtnDL$:Zx`ߍ`^NΡO_m[̓ۯk%bjKrX6jr5\#QzM@Xd1ŃG1pvg^t3Vla0Y r*ӂ z;۩뾸uMVˬYb5pE.c-"I0xlm" :#iqG\8IAC鈫MU$J0N%'r7txNH0SD0x] I=aN>mߧńEV:;@ 0҃"Ϭn|H;խ6A X^WA"(+9(>J֒bTU#40$As`NTˢ""NS@ҰE!L8L؈PAtC8}'pjA d@ϮǠC:cV(UB0].Iq{J/ Us&Ҫ2~5op1: |'ˆ ]?"aM5O]%h^WbFfy!&xAXt"Fd-L^; D(ԇ뵒#/{ FZ+"DgqT xtqlu.!ai JNn!8St0h%’DHI )tO !!8^%QkFKa; <$HpN:.d$RnA; $|`&cGy`wUQ)j34IBC,a ȼOggq'A( "\\ Wh Dj#o;J|α|G`dAN`7); CQq-^޿*Ct${#:wAHxfV#^UJJ>#9?8(+^8h8/?`* CPAPJp?IhВ-;BR6H"T_zh$Z*'l}^ҔKdM ֟`Wb^M"C},p%š  A/ [J_8daH7lBlR-!D! h։ECC} 0 w|+8`s$X1k6Źkc=AO$i*~TK.l}gpo|`( .~eLU刚caq>|y8{6h.b! Ȓ(D(i($ViW45*}聩 =D Ql@(Er?XɴCDTBOg`OҨB\qu _T?* |ǫٙ%w(%oёPk?Wf\_CHimtJ?G ʩ<2Б}3ـ\;lq:/~l\'y `ǣLO٧,d%>NOYrfҍ0(L Ń Ă"gӹ1J:A^;D@HB1C~:IZY)W*#FLtnpELrL0`xd0 !t: KAmԤ$~k,F&Be(ɰv~NPy/^k. 6:8]::N#Ug4ei8LQE8.+P#fƆA#j_ >1@ݸ>PO@Lz17 68Gtݰ( ?zBk93PD g A>1 Yx@}:ऊ:=>֝wD6""r3ّݑxth'$3B~pP@戝"nU&쵧BLUXLb=ߟ,4o>CЕ)ܭwS s>#ׄ&Q+BX΄ &DKiܑ>ve&TDOPı>X؋bA\'.|DzyڍmM@[EJ#)%@٩/hR!CgKC,̜X䯭g75լ.sS Jp|WҤȼj>>SzlpRтj${RG5Di_ GWl7w^at[ 0?'_EdɈU!I )*Ց7[}턏/K>=g1"fd!ϴ,!<T];2׋!5z|@дPV,V2B!@v Z>__@/IJhv|I^_>/[TG@bڄzld՝, _#Kbv%~Pql(7W+CTPˑR^" δHT`f _^r,Ȼ,G3v/6 z|n>eQv/RUHH]#'CUރ\gx(?*=6R^zu𦠅)ӿ+5YP4VH+\A bLL,!>J]=ڂ IPX *e.y[oۃ[@ˇ_ xeBV8@Xd.  }PHTU'w)hD9 a$U1WA6 ȝ98r(<#I^āΡXGMZBjB DD9{ 6wDTsCIcnK,/W G9E3yw:r=uD5@ Ө=1Qauv!d ?_P ~H)$D_;i #Ж ;2I,J,IAԄ  C$}҃ HD=Ǩu?8)> 0 ^~@HTI`}}䑩$0TsE#}斝UI>#էݳ8;oiPF9F+C +lQ@$a ` PXDtLMQr?MMEM3'` P^8ALkVvJ)fo7V*岤AEJ_@?w]"AU}d}nƝ44K5K‡RL QT_om?e9\@TBebTO )@ аY"1AY&SYGKLqccݧwg|K}}mtvO4n%*9}P| ݁RT$6eI%o(iBu_kxz}}gXҀXxy)O$Pz}җсۃ >۾@kq.3x7Z{שiݹv1zas۞ۜ*R7 :',ޘ1嬘=],J(k}@2>  oz{x<% ٽ|6Zi<Cu[z3n=WO}m+;}(  -`4((hn<uI鬀[Q$;]ȗoNP( 2ƢlRt]v"b*OY zWV`: U(G݇@ѥD ]z:м>;nZ5$hC벨++xӵ꧇uk|w;>Нh΍s0G:H4 60J( |@i{mޏH=}sɪС!ʚӆ5O P önĨP(N]ݰ||}7w;پJh^;=c.ֺZ`{G}`z aֲaiBz}^Ȫ+^wɛzB4$J# P @3^sWD/(3m{oi Y 3׮{>mnRk@^>^XTUT/ox!mh>=%@>FƢhJԌڅ)}|7Qt5tg>âJ@nA/FTPPPGOl3ahՏk Ϧ۶h@E!Hܢct@[eH)Ҁ(a֞'+MZic-X6Nl^l;=JP5vס"{KJ}{o;h;\u|e;W弭{}z;*{׫Gyx2ѫ݉"oCQ63CsͲo\wozc^snZ6͆sC^]3f;s}+Ofu҅uj\H:r:5^yB[{ƺz awf8DWkdm;;j*a7їOwZF}b.顶9bC$@92Pb+涾i4]ス"T{o1Ouv׼ nW}seKf*`Wp9Xe>y.Wآ}۹wepplǢ2fʭX݈%v} +GTiWC&S!4ل?$epi.s'J%:2AйowޢL) O>O-󜁕*,_s5eӻw?D쪨SUIUAT5W,0_0.7BI:k/I8Qu5Ty%6G#x""P$ESz!ӯ ?.цZn.'v4]貙uѷ_{?v @TĉuҪ,$D0 Rm0쟲OyK|#Q#Iăy%?|\X pp~~ʲȢ fZB>6L**6My/ p34H* w#7-ziERP _t=oŠz}z:K(?mcKزeHW3ztt2 )Di(6 &K,eB AnfEXҊȈ5l/a^^sp0t9DhsG.'8w]b%I*{z?k[Is˪~VF7ë׻HAyJc\AsOإħc+iǞ~.z?d9q镛NÙG.ӧfsl[[p؉(CQ  Ԍ4HNÚXRfs)rZ,opވտqt,cxsН;y4D{:dAZosRo{8AMK0ϡexE4_ձÒ5KKA?挹kR _T R,;m/q-í; }`%|Z4:ҥ͙{/pi\5uILwQtnpK^EUf;1YHK222#Xp.cA:[]69xYX#";j@R^lRO9qV;9iWJt`ƌ$IE '=Y*v@GQe(Z+*w,n>wlKAU+JP6fܽ&GJL\.ʔAHgaR\vvW7צȱ@ͷbG:8L{8V"jPxΣxo-X ҩzVBY =N r*/8󔕵}4w{㫸 4VdI1ʒ.3VJJPBqUQ EAIn)JE( DAŲh$"0!MVq9圈*QP#~׃ -[S)Y~^ݛ~fyBIQկ/-iKn+Oʪ ۮz$끌?,eq0-"aFCV鮘).n\J BJ Z-RG2:n AB 2 @xDy`DeKD [FҳŎ#/w]+wS\HNEVBJjU sΆVJmijwnݒ 9:8'W)79N9qwR3R #]҉jUm;Z,lX+R)KIV6;]^ȇwsmUj[ Z5h5Xwsqsw X8uQVƢˊնڍ4 Jԥ u4fdۃ=u]EDIDZ%;,>K ?N-ś2abOjPHOQ:C8xB,^V&H@}䆥k4v{d}+k@mʭ=9+e♒3!rc4I&FT^yI@5Yf[rWrL<' !?b{d^? riקF\QO2gӶ>DAl<&>] 5>xwYrc% Ftu=%zvuЂF&Hʚ.6^*Ug*t݆wm 3lkCUZ F g +mєc2TڅNC$X@M7l6#B Q!' ?Y'7"a}sI9#gAˣEw([KvK`mx#mΌ:ZkNSU8hwyH'=Rm\ho, ͷ)tc*,>vsVhr7{J"zEyBӍR<cW-L⬱"ȊSL4Jc,x?K`\7ŏ7h{.A 4UY*)`j #2GGenHd& ݫC{׹H;qq-Χd>uݤ;Kg75n/jϡu9R|:ܡ/ud:?bHC T,NZSIp7[S=W]pHVe4bMYfaQB(CJt eݽˍNs^dI :sb#npXKQU_^p>W]tw\fȚɈۮ-JsS+=o9ޖEOqK;E4 !Ht.fe֮.p\oߺ>װ|9ES.Idak0bDZZ x{eofۗ+$,X(\b[u5̽^zl%jL 1X+K \8)AfeDH5-lu gDu5݈0 1Qz[pDJRkc;4gpQeT\5vCAFk5ZQ(‹b@jRט4ň#\Sd+vi4l:u-lmcdl2DUԣD߮(-[d Q̩`7QT(#ÇknfҪZJF#06X7,pZ$ P :q+*[hX2 .@<<ާ.;yms*mj2(M~ov'[+Ru٘e;匲V֖-Aimm(تi4àfƨwtDGGpsK R2SA+~6CKPEd E$X)Udѫ5vP%)4e •]aL"3gsOOv~LG%HoOpn2G#]0^??J3v6C9HGҎʚC0ZqFҴUS&Ċ߇\?(L?ߍpON/,`[Ee; vc^T?@/:J]N_,XLJ@DÔtt'LKXro(J'|#?L=p``o}O6R+s'lϾ]j7g1TïuXca 36%@wQ 2}ٔ%-~}9W9Lhuz6Zhjr݆&}7V̘4P:`L3B,7! 2B?-esozp>yeDW~c x_'-iMe] rO>k_ /OnB벊t!K\%O(km ;'{ A!;6(_hGJm7mbHKOlW;97·ծ<)TIE:`w6A &'ᪿo>eqĢhCT4Hp -ݾ븟VTxx3XQyyiJen.+^(+)D墻"CMRqKZX+5qH%9ѣqkmƊ׀N/USwZtm&FQ-dFQ1u>[ؽL&MZclzG=mXqNQza]UBVLrbcֹUURUUPz:d-YI)*R~>R-Je_.IRZI,_K*I%,Y,IR)*U#v1aUUJUBQfsihұRe *_uٳxz#϶Ƭ^MG }ػ/g>}uZ'|SQK=DDy " @jNOvpCR\W@֘Z$elPI30Ty9J%9b'˦U]ӠTUq!&h~E$D}S)1˺z*GW2SxW>oC;w(d*6IԧQChP ^ܙv[_iЧqbvQ_){Z15 %~WhPҤ^]~_iPYzqq9^2 u-ru~I!# e x-z"(D q6-=3|Q9J4q< ͙˛ƤK16ct#_/ɔIK)i%M$6RRXKi\JI6MJQUG,gjUTRRS0Z(QYvzm2*K&V++JLRmbtC$$!2VߚniL 骞uPX ~CR't>rqw*a1%J+=ɾ&ѷ=˄J6.=^ˍ0SJVW-BYD_=2y2:VXԞIW&:~ھ(-ܶ05I)I 7G ThojsRf:Is&2hw'S)ICh7R@狕gU!|8AŨMLvr"d(ӌ .iz$]21 Pv9H.9U {w]ŸS?BY$&Zh$:)ṅbhE}6?\;EE:`??NGbb`-ƄAD!LG?*.IXϚ+05 ͌uo?+sN*)'RWqJ^i8y"퓙`vj2rGX 2i'?Kvd0#r%H$ ԍC];vfwbuXq49n7#\_:{7>x~rWW'X6GCLa񗳄>{q;=(Ylk(Eѣ9-~Lܒnf܋5BJb|޿.*;ݿ&kӵ(вxKkfc}COS6孵xu~$]QxډIqzI)c#6eFb`ڋ}QPСII9V[XsIQEk!@D{OI0/]y~:ac7&)޿i uS&0 LIQQ^ptBaNae5  aO mTϷߥH1DzqJßmɋIT阗tT{O=ɾ΂Z\7*'ԒaS^9ynδ>^#Xrca~ 21z^;M Y&΅9ۆpY;_ 1nr5䟛bl >:x-1g+*FF"fƚ}ܳeLUzzk(<)^<)@OSI፩bUPU|s?MTF9)EpəЙ>0`^\ NLNb_^msGۏ;tmar-aQ^#җR_ ET^%69 Qa*t-:+tS9ԯ,+yB>t(uEk*D]g1Sk1LU{(^rpȿy8ϣcpXgczZJY lm\C'CGΨZ`(SOoJ (F_]vfK;lE (كo/$|WMaAS[Ԁ"B*܌cclŨ(-ҮUfD@ 3W;R5F?G6q:-H{3g lziָnN(2.v 2x8PxE'^dm!Gb(UDD9$Q5=}@P%6Vʌr|?kAi/U/%:"Q9ҿbP_7Okf0y.pxAbʒmصC=ι;ruN\SudT]j*" !:x5AAc?c/S)P!!nj=jrO]a.I (8}4hRV=B)$bHtz=5 -?ۯ‰?z28R33]fh?aSDz+r*7CxHF=Nx!2jJK76B0-W ^^XKsB'jνOLط"k1q֕1g7b.ҳ|J&egP $um9+g ?mćiw4/kT?ŅUR"Q(R"T%`K)%IoWh*TJG[))+⬦~hоj"52eAM(RH^]3x~4A#Qb:?Xˌa¿uSgNZkruqe]}t_z 3=ﮒ9G=:x C:ΏJ4[ۣ9fݝM (P졢]0 (Ty=6)8׳8¯# @{ 42:]A08._n2LLtɷTQh)Ejeo{gjEZ|vtI)wXMϪtɌߊ?(ѱvHt%[Ԑѻ}JH<4A,.]$$¿BK4:v\ ׍p)KR'ῳS!xeóg9wHHI$!"gD"^yab>wtQkW~u>S4tR(,YQ-,]Q|dڟɉ_8uY EZTeW="vj)1#6gBvmf˹jVjaVtŦTڗt7;E{>^o= J!|ܚHsgw iq 3h0T!Cz_a)_yck$*lm $@BhYtF^lAdvezûz= Tw)9TQf29HHEߑ e$&0yv"<;9ft\GyBLF Ӌ򍇗,4,8t5T"qxD;Meș،7TuiYZ g^ᇞ 8~l6'Dk__`)JP @R%t)NI.R;&sFwV*6\ʵ;F4dBP#mKE?Jj9YFj<A MJ9>&gov(Ga2L@6Èpp:fIE)@d('IտͦRR!OJJm,D'L=ȐSK1BiӇ849OC9LǬioOM|g:s ǚ΢Ќ@s\xNloy鐉9WoSSk[RSAI".zdP|)y]^'QH(Ul^;vgsx[_Nͪ6 SPQW4lKG S`Uc16!иm:TI U 7p㵼鹫 !}{.7p-g7=]PN7v}}5jpu=c٣\ h9 E,!73FmFCJ4]8:wF{S)Vi?ekdN3_8lC3ayf?Z Ψ58N͔cpHDu$!:em[y/D,-wz#9*P! #8{ hP")j],o|gyvgAǓPiRt vݵlgSoc7p;z0k?՘llb6 46h̔?tQ2w^oCAJf ̇;pЛه-j/||< ܺƅ=~_VDDT?'192N3bCff\G} No+~S!הsyM|>,k:\BIRƴ0R]aulN oq٭q2wt3)?% DR`oC (?9]q馸I#i8wmZ-ޔ)TP98y~oe''/kѭZjڪ~MvєyN֏ϗ :rGμߏs%2HW9iэqLM B8X?SŞ;>w+#'4R òvR?r^`Ͼj2 S~gf CCd~ d8sNHtWmu =qBf7Mޝ-u- p.DTEU_@@.ףfյAEdٝa 21V0`Vo[5>aԦ^0ix\usin?K'䃛+POy 뼍 JPo5禍忴o5FQh#"Nq9YR"]g'x2Q]֟ƱffR i Re3j?O:ח8VE8(srGme!#(g^OYDm؋Nz{/u0STu9J2߄:z*µˉ1:rFMw$Kojf 'RwSDz41o⛔sB*:CѲm]C[}y^3i15ƂL^S!*]ir*CT"_~??q=td:M월WNZ a4CzW|,?Qa-8J=C&8heT q)[ZAz!k&BmTR{fiVu~P0lEJ#VOo_dmߐU$X&q2jqf:SE}S Szog-<ݘvks1i]Kxe?$mwzCCT$3 \q0,,-VJs|yxd*k|9/m.j}pxjyX.j+T%SYf9~猶S̊,(Ϫ9ʋl^MݳDsHP`L˪&AFҡ{g)+r܍by0 U+bs$/# ˷J/uoJSU[)qBfFh0ta*Y/ەՙ%%d!qz\'[RѸ|m\71aB>u(I$IWu2L~WȓY݈w-2a%F\*" Y*/ ]v?BK?fomy\"aM5sНQʶ$3'Ig<=T]|DN㸜IvdFd }le2G囪yBa@l橦~ I-?|g^;uTRNwHtH:a50kx[8:NKW_I&/U 8*~D :>#{]ْb̦?M5Tԛny&lj(f3ӘKC& "dExo~`ᨦxyhDJM?T@暐zCS$BoP!"H[6GٯD<;Gjҧrzrֿ}iƔwp[y=IgZ N_OKGc=rAh}B'9sf'-VJpНZ8 QsFlSgNLN4Na=Y ˋ=;yc)=-$؋>^2EP *$዗O .rLDA}*E6Xtuç%7Ot{c#N!eCXTk!_O/;N!L? (} A q$/DAk:\$@zPMV3RĘӈD Ta̚ayΘ?l)?y̸iCh K+nfc,Y]m8kDyK)vQuK)&= o"f3ퟒP.(wvu:\f5겣N*J`|`)'ZBAʠ˼*dP9 6#hXX{ =F5YF(<dKvuC'yeInÊ՝qN*)SiLj.\,JL8G٘(J , ir ;'qݔFB.BFюq+&U ncQu! }ؓfb$ZȪKMPA}eq~]:g{ juWVo (\4FTǞ0#G鎵;gU7| j uÚ֌!EvBM9e +h:8CD% Ҷx'5[/O:Þ쟉G/TQcMo[+xgRs |mHcWCc}IFu%Aygosq6I>l*0b$(\K?WV' !=aaGG H o]gnΘc~ͧGrZk?LBhAJ>Xn0*HqC$3Ty^1;@uAbo($87dž5PhBBDr_G9EX0P7ni%$'?鏈Bo=;Jt/SҦ):?FXMPrWvjs(x's+SڬRO,h<ޑGه\9 "'(\tP5h1ɈJWOw"zJĒ$ɇ ,lSdo܇6\jw?"ǮΞWTРt}# GDØ'(>|+_;/HΏlaاB"*]G8!1_ӆ?5COIadyu'vŝ#1r a!- /ya kd<"OfAE$UHl8pfKF["٩E}1,7@;!XĢݳԾ ! *S9obUO)b OWLnkve9f7iS#ٯg LZ"(Db<ጊq&[2d!7.[!&ػ!Q6Ȥme al,r?Ѧ5h9ݼ!ӎWՖ|z9/GdnP:.r0R>t{RM ?NuT?wn΅dTҕe-;ȕMU+ڟB9d~leZ)Os;)_]ꀬƆl.DɎ* /^?X8v|ycs:6lgib?ƕLa^ŏ l/cu *l;36fدq@ cV66ذtp 7d놘Cgv,m nzNӣDu$xx8Y=I}W'*m"g2C]Ų>>NEۼMdrN Q6hm4z2mBy8ʷek9S!ze'hVg}?G?t LZTubb+eG@zgxu1#5YoVF66ּ9LUJE4,/(E$ɴ t>؞ew|'>}5"C}""8|yLvYh k:PRcZd'G8omn⇽D@Ȥi6ǐQ3M㮂,6'fXV?'o1-ޯKB= smsE4cELCg߻I~9S/(wEtlMY+#v#LzT]&vPM3okT7#erJ:" *D8DL2C1h4{c BoYC_w')MbXffcٜ-yi];;O53#lp@\FyO;ß>6k M. p" <'k,f$tbKK1 s8smbIv^7 46~phbdO sP"qQrtxS)qbƞĪ妵h7H<:LW1.G2;̔1dLOӞK"^NaSO)#TR&v;q A1HUdY5 -1GEdpǜc/ 7 SasZ>a¯gS3xCI!j*+>Uրˇ4EqOEOU DXo!&jЗTa-1:ۍ<m5MЁ]uj}V[ɊBoƢM0*b,銈87Dl 1+@u&XdRZ9Cfmo8mC{$ 9\`u9̎0HXZk W"I*iAt|8ip񚌹?ȸcԳM|Mm ( ݩ@&3pY3QO~۞'be!I#VI$<@:NDMb\UUk*_x-,sf0զy}ƆGm"LӎBYCcbWYlRж։eHQ\L`D.eBډZ*ThZRщYl:X%e+f+qp?۴ ~oV)av2F ~&in \"-DkX"QD)NVsDmBRƍ `팭XҕKdf`鲹F l-bJiiB -*nIk0)`Z jM+m ]k0㻉+";5 wCzkࣻ+D[j*#;kEs)B֍зY56 QAZZlPtFT ijХsD͈(TKr]1r: n64-ݝ1m{RXc*yI]j[mKXւ^^* K`sT֭ B#mrUVeAJ0 K`ڑIS-eJ*LỺPmnnY{q ÞDwW4~ 0U#Ik)35i4&״/f$ h*0'.<n$V|IUT?O~!bSuOã.WWgKiqw }yu<@U6ǕE( 4sN9נ9mb4nUxjyh 9TSǸ1S*{]u+c)QEvL{HP3ݪl?Ą05xyW6oQ+nϞd<$UCy[YznkБ5BrL|ܦI,;aw3a{UӝPΎBMeG/ V)z⩓v;QJA$I:Yh,59_9{ya?}/[R?$ Q͖$՜~"[~{KGXjlǿjMmPjOԮ{ +x(^))?4Ih 4}:o(tnTB<_ߗ_VIƹv$l AUYYEeB81m5*!V Вڊ&f}X֪֡ietOEQFR@tďz{{ǵWe2Aov9~N?|i{(? SlJ"*{O?ϵ2.'?F i1 dC<+-"L2 e1$官9t#l.pfك ڨm3vуG0Ɣ5wM (ŤAs'C3I w41nf鍙 ^c`Ce`4Ɇjy:@BB.o?#dwmML7y-;Ua.AGr%v41=C*VJMVC=4}$?|tHu/|gO1&,.=4d"m{Vw"s >iA5) 3-;:iI}tFIOb<'8QF'LE&4;8(e ;qMFBjCX̐S,CF]ʊz78M.qs4(q-iڣJc9,T6Sjf Ӥy)˸T&)K9K.-1곜k.$RDK2iK_I=jkUwaKr'dJ-R.]Z*@r͗n=YΠU8yqPj**dԻUi)(X"jBӼ*hFKBųU0je$Rv$Z"ABskњ7+Q:$jUǍ0 =WZY2o/ӿu_cD@9C{$ (|@7˘X5(S!ϤnYiLU##/%1bc>##z@4kw`W#cY+F̭Ӷzl.-:\[G8 ??4-4*)M)$7Z]yoot}[ﵾAyE $0$ɣ{cqsʸ,husDZki%:m`vql|AH~A,Iۯ{ fJBIݶҥ9C?/5$*U*UaG{0^jwӡڇ]b ;:ah뻫DȗP;*Q _2?X4/ zsN<>D9Ĕ}G)u)QAA[AhNT?$i H()=pf_9Ψ~כQ]}IjVELH&iݵrV4UUTiQBV%EU t[rx,I&FB(b DeipgDgay9©OFX*ƏCLmpNM5AIm8 ¢{r^Xw~z os!U"6<:T :ʦscCtUW_qj%ʡĈMq% &G0=}RxPHtS_dW#ݜNi(N+Vu?u?bP8K=w5'Fo{nݺ9rz'<{gBv"dN;yx[*6#!v$QڗVf6"MDf뉏Wq:ҠA6al&݃ɲ͌ ^o5s^]*/FDrM}.QMt_?G }ެH1Ө+>+f&"ApSw"nI^i r)~{!贤jQ5 O~>`]YlJ|SMIЯ:!hnjaY݆sNT )N1#- QO' r6Wc"ÛeVXi_qҴM=bݝgDTZRJ넓I9=%"?e:r<ϥ}&u[cƮRipN9|(ppQ65*Z1 k4#)R`Rl6S՜Z5=|MVOGb!4b\F4B,"ExK3ʅ1'0 vZO~5+KM2i׻w04E %r M4q̞,v2uAd!ϨzSt,(d/"3&(p ?˶"`v}3<(MA0FNF襋#\ 4p&3І1i DdM&9@vVvt@#M6~.nɦ"h`RofR [ldȗ,mQu9V3E>zmg-Ύ魴d4\Dd85yRA"ws%_9n&-.RsJ׊ڗVS_̄5f i,M'D[G43fa L1BY5Aʎu'8X56ȔNA5خmM+k37+)[sΊ_l-%F+誄euɭ[8̷&ʻэE; a+Xb%iy0zJ77 ! IέK[魴Kg~g.{4Jނ{cbF< (6i@#$˧px3ZTtP"Rx-à%Co뵠y~ݤrP,bLכSΆMիYh Y4{yYM[0(Ֆю:aJFIuin!qڠ7pg;8DJSa%08z}ppzqlkK +-57c=F_7I?" L٠E4k۠רU:ˤ9 j$텄-ʺIBA*b6dj8VV5zhv'ggKk_@RTD &I8/N%ܒ#w!5*O혧??(HflbƳHTѩ,qc59XN3S&c6(U8!@ 0Ƈ(qI7)FdCAZz $CQ-31(}'*'"1-AbCƑiI؈kY)JgYNMFV/4"Χ2`4Ss y19%i q=,޲T2e]^*5O/Q.8;30rMCe8QzEF*DS*sg> ` 0Lf{_@>7vG94j4y;qZ*>) oH 9.䝉!ogU7 L^鳒6q7(F0Pby8bAMdCr@:8aA?npP BNY@?M'ORllTh|3_}&҃J6svN$7v̟ziƲ8-4E뻩oRK>&ԿF7iOE R?8 oVii{9=ՔWAドdXk d-y[@Ƴ9}C7w3"L@G 2/ZR8!D"B( g5r!A (ac&?oAIL`Hq "Apەtb-gz41my`uäggqnԷ*6Tm(-)''EG kƱ|~\4eYLWIuuFI%_.f'[{eZK%u]ҺIWMtZJH#@$cJQiSEUTXb(Q7'8Q 1l`,)M:n{q)/8Q($SYmbJC Z)EKRKi),fY$[IRRWϞ U1KbUDIDXi1i' 8p)4)0 %0QX6VJҩReae.륖&1$1THii)U Tb1Rmk% $#P(Vb0%cJcT+c)Lc" &G# 0تm ZHN=8h`DŔJ4`)fQd0MuJP 0#Ċ4<#> ;;_l$8,h C ]%̭3o9ؓ( Z֒#'9ؒ&dcA:I2IDpy a o/rojVgF}E%W(kATQʸ%X2HXZyȇቘ+vċZ"e}} )=rL5_zįS]2>CE!̠$òͪA2;֍S盾WҴfnC0G 7ԙ}lfOd*LkVxuWߠi?fhvp(H:RGp}@͞}nf#qа1I$ok+&ZT8 {j~)q07~tяCIxu*5"Q vDD2LZ,p*B%$$#%Dlbԓ7E3Pbd7.47F`QP88v}\#*Ɣ$!RRNԞ*a1-B3JG{Z#$T)9 &fh7+ˎe0Bx8ܭ!C ,@3udof%J`x3^WP] @60aIw& IZP*L~wF,kN0C%Nl7{ nfs3=>ߞ;~ssRQC;Ve}I 1Lqq6C\BzMVPR '8m.{Uߟ21QٹXh!z8_Jqvc3{ckݻ<JVU wʍߢ[l%hQO~[⣡p"Aj_'f!Qu׈-`ݱh;`0h3l0ksv`G* I: 9q(P @8V7;@.9^LkfXx~!( !<&?3PYPX6fTFI+js3%Be>_DOڨڼV(e1b**~sBތT92gs2Ƥx RԵk{]&;iޡPRBDwW=O"ea@HjV!33ZR^3դo)7;~lj@ Ah{z"C#?)NR\c1SVXvԫB6n~n퀙Ù9P٣ cč_/^ Nڙ!0\ig%Y$Q {P5.q>UmdʏMd;7ŷ7CT8`}鿧2UT܌s]sѨ$ 8"c@Y1M0Ofd[+m P>N5%3^bSiyûa3HwN9N7S!9@8C -2@., :m_ M%grUwc)^1xlHP}njH 0Vf1? ҍ=]7i_rþ`ZɃTak9'سh0TQ_!{$b1AȆ| GLkj(kiX: ga0',Nod2H/j3`8ޢiCOOzqrݣ)4|ޏl7و;VWDObqMP m:&A%#e:Gzm|]>ᎽL@8].tˎlhj :GXW 60(XԽC}-`9;^O hD;y§)=iXZһUfؽ&;9iOG.[#- 9v5$Y.l8 KOC<(jڃS3U/j04V*bNYlU!$v8+o\7tyY_xIbsյס{'.zB`z6/ b83NCNN@1P("EM4IOsc]G`l<??F 44$ %n+"j)j4pfK JH$V})j М;#"4ČWSޘrK/nKMr.!rFj.0"Puo]l*[z%M!mی)n96G`]η-($989ѤB%̄ TBe#+,'(#o|_bdGzɟJ;M؞F?gWNpp-0>nx)3W/}'>БͩLBSR#{M(")rJ,TwN,)WΝvôDe"v$с~4O!Mܰs{p+ g'EBLQApd'u`;G-i,pŒ=8tFDV9Ki$+**ʲ>C|lLExGa &1)o0((38Cj6Lf|~4kPp4$}-ՋWTy!.D %D>v<](#OmLXR 5@OEaB%b#=;Y#A5W^HqvJ5z3I›Y@f:( (mDzʃo͎J5 N^yul:m`" l)n% {Zuf'`4[kY~]y1T6TjQ<@w &ֿG#Z&HD`8WxٟKi 4IibUH` 5_~dwk ]P<[1+t谿(Pֆ719QI/B!B'ؙX5-luh'n:h{@t8$FdzGqh 4wꃆO ?#h[28IJPA\9d$t(!9Ysϻwg:]箓w/Tu(}>jM؆jS$K99BM!(:)'nDG.jN$JMkC,U'J7@>f +YvmAfݙi J*+l#fɌs & 2- 4ZPЪ->θvC>kQ1TDE"E ` `(%"d$K2T!YqI詴*!"'{C0sIgA'cA < 9h qN/Ә4lGM՚f&\ jёYήUQGӾ5}Bx&΃ K(T}-#<{LR3JΣ'"431GH4Cڛ cQncܸL!_M"0jaCn:}c [LHUC g*SA0Ӏﺬ7P w6°Z@[*ċ]DK1Ӥa4M1:8 ve *6۠׉游kst>|z~r&2T\l QӞ5x2~gjfE;A`9A#@u<Z""A9Eխj~ΌKOβC\"N5䚇fKw>@tIABylyEbi  z5^CSMt֚IlŻw)<^$L\;}DdϪ/ֲaeQ bJ~O'fWa5N6c«qa1XntXj*YI,τn?^Hi $L<Q+&08?lD2C!~CWDrਦ[\YY}fI9re:PzjiyPM$shɶ>y{ ۠kb t[ ݬ!;JT9ED!ށ0'\Ҙa"GE'™AEIom:ogVy#!9&M@dT$fڛ>b! ~E5(ˇ/:$ EE$$$$$%+yZ5TS/5pEF30dQ!HvQp}q]yxjEẒCNB3r5dQ֞c_1&d(Z45lDHF 1("KDBAv@9>p> t Hjb5>p5t~mJÑA{j I1l+/\^^پ`0Ə-c"47v¢T0}mi|ksěL@ Ffdʚ/O]Cn{$ 2Uij)ۄRS ĤJ;m2֛'vŽ_AAwU9THS 5THI!+\?s~#H2rIwD0^M1NH悂?S %7l} U98JO"Irv My`s uD=EYeEi fX+wwe{4B(ޣk*,Ddڃ*ĶF+R2xDMs_mCR[DX7M:'Xx[8Yl'3 V? bWhdG6h/'WyߨSQ喾d-tG!`MãpTW^UiN/bKɛWX,S668=Mq&UHKɳ@Gښ%;i($(#ZM}mzҨӛn*er/VI+sw !(ŢYsȃA~Q&AjnEq؆(c32~KD4T8~=_&i DȀ>+b,q:% ZR0uĪ,Q [4 B?Y\Hʭt6#&!Ùj䄚1`f*ɢMHخ1uMT$ HqePQTRE\D\&ĉl|yO < UAO;~`pMA3 eN9V_y8C8|"z![L6F,:?q%1%iYBMr+>MV:EqDD*w1 tʎЃT{mDh`θwn+R(Я4¸v.PoZ*^񟵰*`sp_]966NOSm$8!  G{l ;yO8}_/J}kFwR~$o6c9sy%VMG-Cg55b " (_âw 4yJH-_}MWD:M뤫ΪJJ BKP@g4ݦHf+꜍}&@lFST!v>LoSxHrsZq GsaeٛZ)61iMrAc%TbX+D:[Ҁ0 r5Q'ʵIF`7aHff*Wq"L)`_hSu9:dUR,@$4An(YX<ȘJ[a{(& E 0]H`jjjzצ۪O߃TB|%zH 2;>PLC?(;SC8" y8%Ӑ蕝 <85tl&MHr Q!NRcyozg{`utK&qF<N/)$1,D eDZ?:CLeꊗX Br-"%ǧg/ t~4<6}bwX/Xrt @P^+y,Mŏ qC)%\i!؉ fS/F)\#Gm@Eiuq xFm)2(QUFj4q6H ,~{}i^6sGmFDT,)נ\"X9ZFȦ7}eЖ$ u_-dla sUR,LBI'D gsgfMۮmhO/aH@!z8zgl5-(R*-+WmBϷݜ]"% Hf:m1JBó0,+NNDqUxeVI 0Qқ%zb|^sLRŏAOclwÓfrTr\{[}3wmprJF5ElW;5r;lG[OSJ $Ѹ4l6)_( cb&0maCYaB<0~$2'D?K Dz $#ìs?#4E/"\Tl-$TDq{F?z;mI|z$$$$Y9̍$4B T}fZxysp+ %5L e@dH*4$ dR9QYYYĠ ,$j9}FzMExQ^R5c%nhٓ"8;;Mk4K H 1)-3*iFEFe%SaI Pffd"aqY# qYqVPbj VDaISAI̍xZVbfD̺$ ,39d'A*+ sDTXh \VRr: VRH'DIbI;P$8;vI3E Di@H-9Jh %ņQ8hG0@DHe$"cr %W5kyb漘j5$\6VF] ViM!yQfz|&-0/w}5gKkRS_Ft%I"Ф|`ڼ^7|ϗxtS2̮KaEkOxy}~eͿs?urdw T.h[,Aԧq}1-:nӑ7 -c8Yd\/j$r [w++G_3!zbyu;*A H!+ CTDyT.H aڍ @m1"i0&$Ȭ[{J)$̂(bh ;DȢS5EBm0DzYJ[ùSW,BS|l#'wu(frƜ)"ųv֭cɇQmr"9@84z4caqxh6:hk"&eEk4 MuV)өXs͓ABxo˒5kD2jȴs%+A4Q^7{J-g4.2-K_Dʢʂ*p*uRUPDpg@MH1ȍ7Js"fՎ;Y-Y341G YE1:y'jzvHF%ڞ0B@3!pa٫IZ?"ـ00   9cɯ0an!3p-u]/K d2{"[G0781$~@vch^|Xm˶%wfᖦ("'%}FЫE X Џ Q^q΁ A$'%$H.ȑ(Ac$w+/ fMG,jΙ7:oFis5q}p|p:bC뙌1$ VCRV5nj)BLq NbۿHzufr똅rga;k`uD*)dOJ: Rq-#P ݠi2) Nrw;?~X 'b9bq=}67-5홺{=c4&vԟe9#_oͳMج;Dn5]e#l}FHS潄{}WEeǼĬR)`Y&fB|M45$Lc1!YwGF4m 1ƹlDp'~\@*;F;,31G94 0MdxMg?ub0z{ыhh9q(bz@ޔj&]G)p4YAƾFת'%D'Iʌh219[6}dbb1ܺY;\,jB>[:!nڐQڍ `BP-Q𮸞eDY rH@VBc#~osv< dcfw%a5wG$}اy om !wdId῅,ظ>gC $ NW/z@l:q6Y2e8w6AUfqjhЍ-r2I pz4l7K5hT1ܦlE`0l1ρ`"EzK ~D RZWf6 LDŖq fIg,Cx92qX9NY@ Fȇ3ݜ XR6JΨ !g{YA59w _uL0/.dwzBQPRE2vJj-,#&G9Ӂp޿ f3@GƑ[ٟقiى95XXѤh:x]Gf 8[zDZqƙ2Һ!x(FLع^5J,Fkږ%4upĀiiBEH UFVNPV茐#X9FoҜ0L{fi𳤬cA-ZoZl.z^a6Ȱ۳'̀IV`Xh6JDiZ4J]/W zxj Od%zNkOQ |Vv>Y`3NڜT,t㱢xFۅ8"93,NF 2 v⡨JC$dpܤ@Zt' HƝ3 g #I t&X@wm(Bk+d'$;⪬<B*>`EOj:ok$$eYSk0t˕ NSmD_Bwzkf|ŵ9;eH yE܁+bDc (nÜa͑ 4@ (t5A$rnd7M{Չ@Ǭc5܅fˇϰQn{ >^Zڇ㗰@ /,U7INT֜癦>64r6r1aZʋUPЩ#}|td5*5b"`/RBJ1;j(WtfeݱIx:'8m݃E34!N1e54*챪YLyIJDY>'cy<͎TQC1Hb>QdLd=LM'9?{ N7s<!&v6x0 N"c$A|>#mT8vL2^bQ2Z:;\RPŒғ$޿fVB>oۇ Ӵrɕ6Y2v4C8FD OmM` X"9-hRqc'j]HE%eHF%yh|lBEףQZJ&fB̚d&=U%6_}q 0\*y6tK~4a=1ȭ*wzws _O\KN]9egxG6:/(mE\IUߜW{7XQl)9tVop@: ]imuuBS݋}2Yf{e n(]9 ^ehZ"ۉʹ^jn-fBeq ̧,vl;DF t SE?XƲieFD3yN![ H b݇99nߨbdŜeWr)YdrU%ڒ [?#pRx ҿC~ul ydSb\d{6>ȗGB}qUb%n^g! t#3dM OVF={JTO_ݟJe%I>.7U6y k, э69">gQq?#y5z/Yf.}lO?ggopַD%#G|4ey:y"0XFKy3!W aϠ<5IY6j2J&CREQ_܈zGMno5'vI^vm#&(_ۑ~SY 3:Hf39#& Q_D<="*GAW׫s{19) sn%'>c !E1Ł"MyxrcesyyI1޺P +ȝ@E5q@}jx8:ȵkV,.i[WIN6]Om}&ڍjLЊH,Fayנ2 K{ r i4Ybol]p0 d!#(SHRi0Akj$ tv@h!@H4M}?1%>Ju|33fJ2)Dzޢv+,J"_"y -v8Cw_Ɲ! :iKWQĠD.}OT#N4o7o)5))wbha">,ձq0as T:]fF򑨟"DPp娥8L!&@ ̘Ғ#Aa0"bJ 0I krEx9B7vK&-/Ǝq3I8PW߯ $,o~q;!S$@,I_` Лa` ͼx2 )hDI`}K5,F_y&gF$}25*n-D~ BQhQ//}F {ȥl٨Y"쳪XF+d08&J`D]X\4rvCkaVVYc=-,Up’h4wr+~j="=}#;=i`:6vi,B(8`]Sx sN2Ha7fݲ|汆,gSk5g;}|OXmb;~=z]eM4}q#9ƌ'_H2zb$b̒DFwtFDG[W7娣=$j w,@vIνWNo(/]4,0EM,D^\BXN=an|]׎ձ6P뾌sQg#hN0 Y1 u .MdEwc؇oRO(6W7|Rq]k17-Ѣk: EaUxi#ݫnFˍ=sx{,]yWHɇOBME1/jlYpv !-'o*w4jGsPcGs&#~l,=N<J쎹+I|[6Lh^ 磵N 1,P:;ɔǠ\F\]1]fKg\SKzwB;Ow0zsǘ۞K!%4`݆xg*GpQ6ON7Fΐ*0UtvLuݱ߈0ISxU垹qO Y5e+$RF}|zappp/Nf(pd3GPƌK@ a&{ğEhe]nzf z74:y}nz"@1D Eg8]q"  L%"\?@v&"p0/)1h"0P)# Pˎ6ReDl >v7㢼̒=y>eM iU}2TGVw]eEOIŰ]x>ߨI,F~v68Ѣ;S;52~K]籦ҐUoͦcwL,&PNy+A <K.bHP$.gG2aE2s}(N`:8*q.-Z,6W Е,E-uj4[ڎ,Aӌ}Fr|L",cߨ!e F[㶼)7Zh 㩴~F>BQ:KXP4_2bnTʚ}Q\t͇٫]Rk0[0Od 9dEA-(tl! (rtTLrYke& <ܙ\ͮ#u{)xU*WޝX6Z,A[_ɄeL/);-)h!1أOÝr;7c y#w# ZE7Gu\+tb[ aqxژq-N&l^U$z!& sIשw | tWi{Vhmd 1%zӗۄ9I25Q~VÈlܜ1?INÓE)l҇qm:NÓp#AS903_< iJ 4&^\if BSQPcGaE?;_V?JjG.Q)dC4a~6oA?M&dJ'+ n>LW'4u(L*BZOO)#NpwDV)G&yx0`6)BXW^x?OGm)eu:d0`_3hXQ#Th6F9EQkF%TR-gxyUV_N=٭AI5K'l2JSF$gOIX#=癄 aq1E*&f&9G-9'?Z5ĥ^L)m]QOe/unqX~ꡉjS1ĨX0hG@D)+!p3˳⠦#˼or%?V&t UM| svg>; dђA$OtT@'.4҅t7vpIAk q);Ӹ$`Hx  aVq48Fh`ᨴ'NI]aT0Z!9 GQX"JrZ`e h 9bY4H >CSDR"PF4bRC9uڠVX5TK9K,rҝ;`lQ ǂ/q(-;лM2~-!NRTcbXm.®齆Ro4ƉRK (!yTFeKcϔsI?<SIϒxfn{B5Vc ?!C`BvW#.)r6f >'{ۇϼhfGdj&xm)@c-~c%U2;ۘs>}de~^[fhӦ.Fu& QqgԂ%y\SKc?.a_pm T20=Jv%+&\A;`EX2U>x-(x$YlYwMHZy^{ĹVA_{"vwK}8B j$G!L<{#Kܸ>w}m3Yc]b`EyJ܏(ȡ5e GUytbk X'xEtNǟ.յq\K rv$aG͈h񠝠 gӦ65_OMF2[ EP5KQͲT:є )7))p=y38v"DbC&.Ty5zL981l&ߔxQ==43Q+|VACB̂?K͈(A4hHۖllrz9}Xh3dFm9?RjL )]`^ymfw2f(Ehq]gʢr%":% @|%YaY 8& 0 Ϳ5Rt5_U ֏.G"ohsyz1~V,!l9.JQ ϶:dS~Ipk Z'_; yZywmʆF~gF0ӌN@#K؂<<-&x@D&u vʃbJ[CoЇY_ 2 E! @P{5m&s)$4& -~7"[L$@eDh@&S&ko mܾXv&'}fR`o--, n fа͵Yo07I>V NfyQ'MjgRGPfiYG6&0)"ۯ֭EVk}߁ >)Hm{ݫ o2L@1)t+ϚI4S({PCmd1 b J0`?i:b4"C%E˜L$62+m a>πFfя%~w]1 )7hLݝ,ah Z:%B, ҂DOb\}7 &07Qu6?)&'Iy1H" fAM1١?߂a`DCd!A,I-c0\ciRR0 Q`$+FѴpȑ`17FCbr˵u*yVt:my9Pޜ.),ƞz8s4P\A%BvE 6as CȄYR{-S JݐlLIRk[|(gy .0JJtwOAkoiyU4LQԠtऌ-aJ&1UJ&)%unlJK&EIWRUUcU(UF1Turm.QeWI1"Uc#T*ª%I%1l%[e,ZJŒh]tJI%]JZJMY,]+VRZR],uՌU1JS%tY%+%& P-e8)J9(Ts q8<ݝq Kny(C(mߺ "ȀOH9  FBr2c2܀ c;\];΀\ˮn;8 $wwuuw]U]:0p@wt@rpSwm'Y!w wp$$nۮeЦruR䋻t n2Z*j]9讜ΰVp\@ AڹtDsjn㻈#!Mݘ]:;+mlkQ-,ėwQEF\G :;wqwUٝQn*q˒qĢ-sB!.rW16ݹh6 IL;]s#tsr].clmMQsUc+(ms] Rp˗;wФeb[DKZJ"`TBjdiJl+BX)[UH [++%-2j…ۍUڮkm49]Պ\bP,ecl* lKF-YFѭӝ .wmջnua.uۮ\).:r]tfRN;\Q[aBh,;SHPuvnK nguhJ,N21̥s;A*U(hf 3B/U @†C&$C? S%̐JG"dq$Z=SlVq]VN SZy}Tƶ)zt.."yi4w;0,0`3 !Lz7-xR~L'CJq󑥚Apl@KpFJ|S9ujּGs*e2;MYR-V) |{A#,qx!Ar^V?_F&Y:eqNE'DqQbI*[5bJD"L`튤wWWDHJD0qZ RRLݖo.0J4`gLɆM2MOQKTLxI-[br$@BF䨕~ @2Z?;)^Va֒ғbÿp{v;Ml`!oSf(Ѕ4 !qMԚP_)ݲCGO6ƘN&2i7d#VtٻMOog)IOÑHJ| P1w#:0LXa#>Gbtm.(iW2f#7}`̰p8 ǀzmP< <' &to"t %ǩ*t(` v7C(.Mwt}' ܙ6q6Z s0vy vtsnS'"Qs> U'r %3$9;7F<;?C[,KW=;#zzk'^x:;>GD'{‹G0Pʹ3XvU*xcdq^,/uv|#5Jk!e;ыy9Wky˩8C֊[eﶼknKn;i}:g1 i,;a|M)i'>퉽l# k!F$wNZA+u}~y}}ϝ|%(/N~S9((ci@ezǸvHh:?;"SR# r@C-m2,U)Kb_?ogyX o .j)w 9k9]Fڋr&xgiO'$XF&EUE.Uɬ5+ RR%&ȱK#j/BS|{Nx;S$Y!{~'c6%EL)*}]{#-šq wta`?N!I'{1vNz=B#e)Afz)i.M9 20Sche=gz:1[>SxtIg]$мɣ<UA>M[$,jLTPA5<(I$7yˀȹIʙ.8$06Wǝ(d!̘imݶޒ eqh?C)g9ϔs.-V6_QTDVe)4l5HBH=BfyCT2fD$LJ`dDvG0*ovˡqqng3ˢzR16I]*$kIZv}X΃ RfmؿW08ƢnPjyI{Wp&&C8 " x/&گ]䟥)Nv!3*S59ZQ!:Se๯!F9K;\mD7CH $I+M&`j%aˬAj#&֠s1b$$Qy<;CE'Z{3dzQ6~i$$ t`ҙkE ފ(L3?Jy5`<x yw3uD&wp7 1™N^G1<<>SUTќ=7 } $đuN؜/"R/7ƳZ9pj׾+rJ1i#$ӳȱe@AiնHm(j@Wb6CtJrڞ˻)DJסࠋmkG0hX83z\ ;aK#(>.'>cB=itpN!FLKʍ;<5<ra)$Gcir'*Cy\@8y ^l`F9 {Ptϥ?/UUWlKUW%ܭ&A|ٛ .a1ts|HP}iwl$ƠwXWI!BI$I$%b IF&x!3;\o"5u6H8Xu;ߑm-l:&4>/b C>DD$x!I_1jL0&Ώ3+iӠ/oKU(7e= 9Uk8%U?;P"}d:??O~fYK̶ OgP<G ~~8bhr2MPMi-b`z\gyԓ"e$G7tI/$GPzhlc`eՇ\B "doB ,XԔ̶2jmdB@7uLhoz E#PC|:ʙ+E'YO=b~f&Y"}_*|®iz$6 `Hɠx_tm)/t c=u,lZl ܪU0n=nW6KL!c΢f)(OLT\ &LLQ:jʱҥrv2ʖz{˙}߶MQ<Kfyo5[-,ef)FznT޹yxuɔ`lW8D,mb=g51_8^''%-ۊ9u^_RDyb[/x%Q.0[9y5=[8ly4 QHE KDHP$w#?G/4suQHqPL <2;:/sZf8]k\׬Q^i:N¾sU:U7꟧oGbT {w[+G|Ong3|_˧tKy(:7>[,ur/Oo\λ8bCO?Rn%_3w,%N5O<)R*>wzuR6V*`T~7ׯ%7ff{م鈲Sfpni֨p}<~ z8<5aNi~oM6 NR@'lͿ]z(zqqnQ ,4DzΔ芅O3L!yv"zoӣ8CU$J6]r]Txߢ{'Ivr_d獔Sars2wӿfձHQZ}q-VngI&~b3__^R(Zd*?XUu_QϱӳWX]Z=Wǘ+t@M6*,ЛA{l7R_g'}^/M[ycoFܼhA:>-E/'+Յ]Wz)Mb)*v.`P@c/]=5b,Nǒ!峑>$kFK=q=5D8X%WΰjeEw IoE}?ևBܱFDa鏆7KmN{9:mtwQ ~ln w]/gv{kT}-^]=Xw $v]?}??t/Gʜ]cVjt}+_MBt֨sb->S 3Ѕ$ # Rԓiŧ}\;9OU` 2xc8 [1MZc]2'$pWuT-_1)6JڳNҴ{$4MZ|{PZŵ]C^E 5aە "ܾ]ˆ*7v{Od4&îc9 Bp_% Md1w0ݟm&y(OAtڣU {!xuu ibR74 vH߭:=xBi~lD7Rcr8Pk%2N5^,vJYYh5>#gV Cv-0:<Ywt_s2rs|0~8Z@#sЗiWL߰u~9YY f2&fq?iWo 658Ć`U*):$ɏCH4Ͷ'Gnvb&}̎°$i>\{|ÜTj>nײ_QMq\7nWNubj ՖI&lQ6ی=!(Y|NbFzřc$=>½_ ]Ck1kW\QoX+"ȷ $)3>@zz}<7]]C.tvGo}niRDpsL4YJc? ʪ-㷫a*(=VWQ;lzkcyTyOOL%ƅ{6]d٥%yvgxܳ~7}N=kSMo5+MGY(SB8nVfs]Xtv.y}z\.jKM}Mn=*kt[ٲ:/{ƶx3T&<gS2y[Y [L\!yDsِjꇯG5F#z:͐։R?K}9<Hib3FG̼r%["QM}jӣ[uxYƄ!ɭ<;c<%ܻhTmo,6ϴ.7kݲ0h›Kmv[ӧ8F7jyY(Ud{7nkV^|n> 7D[Bzɘ$4Қwe",*~E|kLxo">}vUUS&'?[nhqMB1)e˜Sπ(˟1QzA%Y^{)m~8F3#ɳTyY<<]G["Wڨ5&Bem$^VL"ɜ#*1_-!pSF]2zZ/z.I@r4 ݎoTQ䄖 @muKvQhŚe F_Ma =6B#7+-r0\xQ SX$QvhZTI~-f'T[as'9 0: 7~PWDB2׮F:$ ՍpJ!(m;B:zi09m:s@mE- 0EQe[a@;A΃nXGe ͢Ka I_Q܉O^%rI(>쒇|&"PHG@#Q6z}'F'L"f<d" >~:ɒ}j:.&'8 #@h#Ni;mH~dF0]0o5?f* M Y;Wu]F`\į*_M~F,l͕8-lՌOz;n+8q5;O.g|;zX3ܫz'Z4>[UrPaBm#4K);g-Bci#H;+ֺa vcǷ> avwm鲦[:eFETtl$tL_WڪRPuqg+T:=|= JQkjZVlv`lb̕YB7ܞPңOdMhK_wrh]~?%ݬϷUvoEܣǪ{ՅuUp'3-|N1D1 8oָv2uܥ ԐŵoE3tMi)sǟN&OhTgsx(]ׅ 'U&vmE}OUA[-ן\qJݩ {5UFOCsY[S]x}7SN^_:+-ByXv/ʨ5Qa5DYail.nj:xkZu; 6g27ON}2OQ}v[X2"\<<Ο 5HE]+Qcc^pӳs-Tl|eۨb')f2ކ &88R^A? 25I[M1+cTTTi뮔LcY,hء)jYd2AlT\gI'w.IH[hcd+κif3C4b*bTS4AXiۛW)V-iͩ+$LRj[%MɘYoY4JHɥKL"JldTh4IEe$ȓi,J35Z62JI&S"$Y!e%J_uڍ"c4DԩL%!JJ[L+i4KM'κYYi%VRc#2FTdHƱVE f"&SRKbBL&[,&VRȤyZI%Y"LԂlkkʹ"LF&Q%LRYHRiҔi1Z[ բH*fu]LU&ߕrr!"Bsi( [İUd&j{h,,dI,$[%(EDK)EBU+ݭKLDiȍ4Ho5q6%7wbS(L5H))Mi Ze%10HC^֋B6&()6M)FQDfBH6K!e4mi"L pUx!HqC6C?__@?);R-45w,VrDHu|MI")Lfb N:tݯqcb `Q`]~? |H|wTDRD+DOO)okvi'B'S&ihLK6ڍYҺa0ᕭHA8P$DI7Pj6j.jMkvRjVJܶYI#UF!D^$C_2bIw 5Zvе2ى_n)KT[}SA]bI2.%0Fae U)JAARR@ZPA $"MUB\Ef0eUZ[eYD^ BqZxoq?p~0uZEt03'e?y˟Ν׽G<?ӵVW@(멇CyiPiP Q X4Efٹ2K)nm:sgT~Uv#gaɌ%*b{CU!-HKwWdV>OHmu1H?[QĄQ:v'??a/ #GwQSJ*eM~a^|[h2G|Ie #~fnnW)EJ$ 4CnU2=QjkgײX_d)[(Zqr, Ѩ@`hpWaa7}*[d֢κ%t;:V<[Vy)?iw[׉_-l73* 4Q 2c>o>UC)QPŊM|eWX\5'Gc~x9Î0WW%9վ -#UFuOVscH= ǀ2ݹg|> 3?Ft{sM HC]H$s(n6[JN.=44dspMJ4,q Րgle$XVf#ѣ!KHKA6su4$^imF`g528_q:®r)G{S$~kJܯGpdJDO7GNOõF~~Ex`Pwe%U弧Kp+7KY1NIJG~45#T2-Ho=s"yT-$)}7TI$[ ԘKA2un29ݣ7Dr7f'QHRnsArGYʃUB`?KI$1dˍᐌ|Tm jI"35Y2%r%%N݌st+$r'ڦR$=[BL}DYuNA^e2KN5.)ġjJ~aVtiyUz:%b,E/9HlD2ww)c:a7;sn&! 6Onݤqj0:䝉6nqɄ1h[HO}Amp#iGW_ѨBH$+ALs)I: \09SY`@;' ~v ofzŸ< Ŏ鞐\_Q`+UT L|o"1h{23nChݐx-$即iCdTYttms!OW>w>;2 X'ȷIT t$vĭ.rWVuՈt~z P׼["_].%a34Z**$8sZ-<80Lm ]EM:.lF (yGY=ZL$TNfLۯD\I qgljD9ɓ%@y`\v~tIԟ'4!oI>'Q4J-. {2zeIJ iʟ)q0P#KE6O1Y7c/Knw'At@ <3S͗Ap`l >"y g\2O {'8)|=L&p&0ҥժ6 eS2U:3zIT`+MH侰j_92`"f01S/2M]7|iR V1p`Jpdy#Uhn*f1:Dd+5o{|5XjiEuwxS:]J½'4f"*hۣ ?Q;m5®b>1 z/_C×ydI #-J}mnB*` $JGmWDH?C?tfN~cX~2[}=&DX2 (4?u>=T&=_!&Z#4koOCC V&!?֒`fy8SʅAzىI}Qj'hf) }y28!gEĠI- Ft|u*-p͜I$֓aXċس`^h|%c;d!@ݷrjr~׻eNT ͼUdΆLC}{YGe5EP"15˯R%jsgZ|O5m,n\,Aƫ?DyGbbΎz *0]+p?lfWR樆xw9%><񯪋먒!-ӏ^8>hHL.!MYq9NԵQimM:>1)Gs?T_ƂJJspiE+d O'̴cmΪrYSoſzG5޻:|b=/-qOӾ5NߛeF?4Hd/, RJ}Z",wfl>g5(U7PmЏvߕrT;otmg+y^jݜ*:e ++^j-XۧHM.V6dz%{ }Pզ}< עx ITږT+{dBDnXz36 Y=uv|l5G>I(Q£*<<+nh̘# ЊBO,^3jgQg]5G6:/&v*]wmFT.T#G<>8+k _$|E*p*C.4"e㇢"<`n(kM~sȾA c+bca v~-!#i'}v^CWWӑƸOnG8ȞTsl5鎛,$JB(xkZ$1Zjבּڍ@vx坙b!OXU[ݠ8⾭r!1Jvfj?Ԫ0Gu5 h{\`OK%#Ydd^`iNmj/m}?sQ1u$/龷XV_xAQnw|i#hWl:ypw3.AVpi:}NS`n\kfv9#㫦x䍝?ӸvA-3ѱPn~YUOO&F'U>aҜO-W1֯ܦs}\]1ߥ\3o w}}kou}|Yߺ{ 5wl~kVM [[nZ]z7׫:ta~jΩa 몇bQhV 0ٖSx,ky-kj*,>JԽջd;-VeS(:#Ui}K:[b6wgfz9<]qzoGtr_UU-H=fn+=,.՜1Wn\3zW#b9r֟HQo eaeuV[2}tf.i渃<;ezM9b#$vYE'헋>#G+1=o7-^P-fbO<>]ݧ hعTvGEOj9Ӝ#h6gjIߋw OUܦdz%Nvи^ș0eho?q+,:z>d!ڵnzg??WO aCbbյ&Mx.a5K䘇yO3ȟ<C ȌmWF[C#E4G?7L3Ύs<} cqϿq0oAUƪ? o"g:tR[l<щ=;LLt_3.} ++>jCq0m Z饬Y b t֚.B.Hr0:IsEvx  $g;#/Ң5,l>/O ~f,}9.LūVE.F/@`kU[Gnͽ>i5~iLX/ ~5P|wHyv"B'#ႮExX?\ >$\'gח%TSTM.B RT_Rq\{L}@KM‚à;Rr "Q@F'Ry@zt`RӖ>Sj<,P:{׬8–RKEEC "B–yN txP5bnt^ *(ᅦe71,[cyG{E ^b?ܿ:y^_WdlՊ"gλW|+5/Qܵ)K%.;=g3[B  E<+̙+<5C*,YYv>8:rzaYK{-40! 33.7{ m|B??zOУ/D)*p<5Oɪ2xEc0S~ H~ xxeP/dzx.Awrڪ8i'=brAST  fK!<9-ɡa9vtghhz26ZMRѺKIRu9kٞSQ*=A 5)5B A}0;O:Ƿ>NAx t߭E` QʚPuęHBDU7D?hdGG]M=w+x1 $寤P|#NߝW{yfED2qZOq\Ml;kfCUp=!.nǪ?T>?ڒ𓊫Nffp.$hmPZʱlsl!qife" ĝdE].qD R9%ngt;Ѐ,;bzL:uneAґdL1rKP{7fѩOC@v! Nh8{OL;-rwyHt<8GNdDimfY`^GasA# %5v޳HG:`33P1r|]{&p*4y7ֵ38)c)YX̑y))%D*N۽Hz0(5zsޜB۞Ձ$ k3Kn?tCqIA͒&z`ȈI)FxAUgfbZXS:ǻ)'L{{ rͽcе3>Uz4|E Q:^F$@0{GC ]C S?1F=WrтׯLW"aZ7x9Mn{ss쇣2ty NTKCj҆ub})ajL o!٘l:ҾD;%>璉(C::0,$[2y*KYY|tp뢿M1vz|=[<#Dzv7{9;L a'3n;gduf5uyIIs@6 F֗XO؞5-a!R紃RN^(4E}6 7$6jP %۞vώr㈠R"d>{|0Uܩ;p hl1P@>}B3$DWNuDxBIq?'-U߆@+p!?2d*a_1J_st*COsya*]^[+"eY}ŏZ hE~+c*, d7?fǏ "q D/NZӾrJ`?Cm?q=i2:cQLTKJ[,SjR(;3 IFj5o ً @#P3(|{rVav1t"X7e6'E:^%L$ ?gǶ"<}yv O| G՚VKjzlyI\"3￞!/+kbjbJT\226n_CY}P?HKJplNndhX;&tCB2Lx50:(+m{VE-8 $5?* qel3F=rBat@m>eAR!G8奂?Dh`m!S?4[Go <5I$Hg;ؾY23*֛cu$E&^oc x"w'Ll&oɪPb2 QdP>v)\) Ey (A_F:RӍ O7UC>t8_K9rR@ɹo!'XFYsfO1\X2x'n8%TrFP=G,zrޗ#̏ihRZ8Y!"3tDEHR[C1M 7ܼs+qU\OHD &-kDڤyFDXfa#֝2.ԗa;u8mo!KBF@LH1@%#!^I%g Ώ=OV4~Mؑ@֞>ȈFT(䳦! Zv*WD(U#EtW/dŶ&9z#ѹZ7a2@kqŘ6Bdr.]1KyՔ4X""uIKv+8M$,g V/ԋ! ̍pls#Ynt!K"I/ہgO,6rx5>%!)tN;~FŸ`6VDfG<] BN$]xg$M2Hе$$&-=4TOP7R{B<2GI"SdxEzYWndg7TIUp{cڗ?z|ByXQՊշFKۇ˒jQ7hKm ȤH<9ЀiBV>+]: Is$G=|QG#as0ZQmx2L$aepL8􉝭OO4=cihP̏ˮR+Q(D ӈI z@wpM6 K&n7Qw%CWpRI+$XCO(4b`G3LOgwomÓAf!?af(&%o~e8Q3E9њI Fjo٥+qs.sRܣ]O.!xsp-Űhk^sBK jx! #@Xk1ak{n:%W/-]2h@ޱ]Q ۀIQz着1Gdjo2;}gK2ahF4U?Dpq.80~ek&Lc16d}m87j >&"dXVĒI$I(QA GVxsd=p"`'qR;&^S5#Z~4AOX{Q(ȐF\Kdf524+/~SdT߿b[9fVqMK 5|%\\IT- WOFfu;xu|շt`陀pG7IId9;#U Ir(q Ւ۳f6U=Bg▒ LbD!h2Oq2EBubM,͚$NB4 |&fY}j֖?Ȥ6hV!RϻS*"| 7SANLLff$*̳Ml]xf(};b,IkkHYU;r4@{nFj"S-fM_AV;bX Yʘ|MM,m]Ӳp<;"h\C3fuޙPG}*1/pk`}NOjUfΛlQʴVF>EsWUuBL]𕸟 CDO ưa!m;p5 thkC+ܩ5ԉ?K&ȃa?@R2`Bm6ڕwBx:o`Q"E4Es4}bMw~'Ty( !c3Sv95\#V.>Y}*?~ݥd)ʶ4XtCsW":hT9#s~q_ Nߌsߏ|IԌ}Ȃo2>bvv;+#h"݋;yWtLpfy{u^>75%|uA2D=4CJ/~?2 $<"Mlָ,9~fT 3.,2%B)H=5^gc?R 3XOlXBo_Kgzզ])bbFV띤ۻU8Ώ%\;;7K*U/vSN-am5Xw11Pε41*y-ʄ AZe-jk#ɥٱE'dhg#&*tp?BVůVlM]..^+[|qr7ê5qخɏlncg;uŻP@~(#\Hu9h#\]pz~g5 B A"r$?S,?'9bqqA)BƱNF0]Z7Ko\(z"3ab.5xwtdqML'v[_W,\Kd0ftv 2鎼AoQfEkhZ{b*8Gep.9~͌i̵|YGFy-㲾Tr,т]r<QO^f&.~񅘓K:k3ID3 ҙnXpoXz]MjW owA2(ATo 绛Y{YVNG{Ռ~nXCƣo6 :Rx0;'z"xBUzas;y 4lӘvjէ5\urX(U͚ƭC4.Ψ<ᴥ eC;8éov(_'GTz  X'f*\#v-܊ce9(Z thFpxF4 M>BE9TLؘ3ї1 Vif1hC2C|,!,_YeO儨}뱆=64WyT eӧ2DsՕɺ^*q2^E7_UXUBǔRn,hz!G|F3xk㘚ut<͂ݏAo"O|˯1 WtjeZgMqO ㌚<@W h5)f ^'7#sbvSvBBVկ땶srTa]j͑z5w_ժ˰9HD*"ZiޠN{u]= YɄݤV @n4~yO@678: Z<|y݆wض,XQUVVS&|Ԙ ;_bq|B@jtq\ ;fi8ߥ띓^XGtU+_btordakXXܥƲ8 [#9TqDmg|ck f;s{sjo330 4v)Zy^QVhkPVy4+Ѡfv0D۴jk6*Uc `F/X53W4Su2+Taw1N 4XxCIW#Tȅ14y?Q&31}><8h}əHV03 pWܰc(t`>tI$" ,G]=Fg^GubGar>VE.rƚپAfSj_T ܱLԲLDUuUFZ14N~+#&UZ:hiV ̡BՐ*?I4û׼ʘpz_3uUU;iZbD [뒹|9Z#g˃='paZr1 j޽=|k߾  4 &&ff yQEED21L`!x!ՑAӨU4 ԣOoRUf"!^o,%YF>pj8ݽɓ\g1tyμpykYK~ͺM@ƫ4j|}2`L3slEe[Y\ia-0^|8 l{w3f}|8<1 "fFcDS^=a sdF bvXՉ C{@VTcixh ʈRI k+o)tsVi;ߖph)ۄ2&a:oZL+1xjӪQ9|v@=޼; Ε!dkZFIt]~5ͦႋ]$)}y<@2f(eT)u)Y笐gturo7|佐7rIlߜ`1DS=! iȼzת2nJ_&mމ2Tc ~3XwԄq[d_=fo3>{\#܃КYؕs1i|0 5MV=n4hsQaR;a}&"+/+?9m%M'UO(~X?X;tZȖUBbK3|6z==}77"UNA\b5PU]GWvDX優}e/:vr4_6CtF(ۺN!LvJ}%#R$z(xDKຮɎzwj'$ zjüvx""K(+ 9%"HѨHDGv`m53t^D*4A& oL&rQmuZ)v&Cx} =vFX8=9.LRTX7+=fHd]mc.xG.U_BQMங)_9g#ɴc3ܭM?A21VAh}*9qQ^۟ͬv3@CeN~ZB̸ $i3g $dL|3WElك2ys #c٨gQP+㾲79 ؜n̫lC Gy sVf*h>7.{Zjj'ծ5g'fv <בTawvy0py0A*:s Qy%YmhXs]]|Khz+Ƽmfkn5]|uS}Ð\>~J]ݼ|:| Tw|EfU*B( CN;+ K<R^Mgy-5kKL!G eD)\k}-DL/dkD0!(Bto]ybFwƇgngH }_z>4\nǧ_<mj#cKD݆voTao/&/ G*,0gĀtGG6I19*R;05nc㣖o)WIČoo=Xx}#Q (ൃ!cv->o31bnQޓѮk~&q_/فo"vw{+e~VFz ROqu8x&j;"&ls/&]>'VZ6ZJq"PNo7q>MZ_bSqA*Ԣ5u5m &3?Ee"qG˜W#aB2phVP-xg ;5;фUli PVhS[hݶC^6cTk{d:ə2d%և#;htXsϞA˄#:F/)L'ċ97&k![h:>YĶ%eg*kYu>G̸(r|WWh`5C3N.頗(]+uUn椑~c*av,Ih߶vQ&3ڊ6C8ɡx'DF|#ٲe1~j@PåvT*@WhIdt/b,T0x&hh IZr B$j K3\bWȵ8j\I/M柄[;$x'W/@EnT8ړT [( 4tT <ءpv z_;ueHݐ'|zžވh BԻ Sq/?zOI<ۭi$p$co$$9,!#i&s!mIpH۾Waϴsv>+ S='RN)*!E-2\vrff& N'D;w8C{Kt:ף+psՑfEBi45'f "jHw.^+6UJmM& +u:7:ĿN\wu\o3T_wA3v}7G,g6Wk%zתXbe{IvQ ] ݪRt#IOQ$idb ׎ ȹy_?^O|CΦ˾w+ӷT<̲IJhfNF :oRg&^˿B3Mr686{2r1w4DЖw@a&p)CrSAy骿:o>tZ^ͳЋc;"1QҞ5 ܑBV;zwMo~ {RBI,8}HYLq;W6HނE$sҹu.`Q⵨c7gK;Aя9zk߉Ưv^0g,^!##4,38c|oHJ]_MS<@Sx|J|EhĆ*:8k*(o}'Tf)/(ΐ^A]15t1d'L~뎫L#[Ģ$@ߜi-wHsbl 0⛼J2My~^]vk? ZT* ti6E&lɷW:\Hz+vT;GFE6&߷oԌk1QEd?YZ[m,ґ16BB"Sv   ⺃ctai)v (L<%r?Br@d$CXN,TH >U9Ψ%…_mhPSTs*8?xDx AD* t(wf" ADVS-Eon拑 X(6dn 仲mK&qdЋrD(S"\pyfı8,yY JsM(!,8,H+\ X氛Ӓ0!̎DC )R P'Nf)Ld@6al*P:IeHQ r]"5PAȝHT(@,aDgX)!R +%"s+=HMQ k68G%&Bs 0';y֕Ixg([,S-i5E*9ӐќuKZ9/y\D-?}\1QÈf߄P)}zm!2$b4Ym)d,RӍ4 9~*p,:DSLeUgWG?7Q[QI}G32Hi"(ŏp: 0|gȟ+g7ǃdE$[2Y0!|OyL9/*GFE"#"' ̋;?L9F#T`[(JHDD@)ˌP@̸ECFV"AF"I˴ v%įr%cfVi3̨Ij/ HjHaq<,9ё Ol6E&G.b\)9ݎD+^6Se9+ttbx(>0t#!09 G"9q.3/+ 8L np0)8L3!d^R[< q%ed+e\1~5H;I4d"H5@Ğ晠LE "_di6-dh D9!!=v9ہR d H eu%I 9ȑ0Ȇ$vHFHh@b""A BjFaVTJӝӧ"KҠ0wDAsUOE3ԁI2HBDH@ s8) _ `Ϡokfal{/1}_$swĜv7F~woI2V$ F"H RU9!:E3H9Yy=A_q$QD|rF\? A:kHS+*GG?dONL(0yC+|ߌ 6Lc1<2:<;7|фNϕPn6d a ZȕxɍXPD* x?.8cŌsKd뫮ulk]La0lXŕfA<,*4s$YI2I9|1MW4iCJJLK!J3%U_d?s5IU--l0'Utjz5lacli][;I='J4b,fղgENBшEڑFH؍<͹=Z}n#ڽջZ{<lSvYLu pdU##(H,CTbL!ê ħ G ncW97C;9O)fppFlޮHpU^!ޞr)A拂{ {\DPDyolD(`FAh5,a ΧxxiMĕ2MHpzb.#IڽI) $I;n܇GΝ#X:$ZL 8ɦl%7o&L6懖kc!D@?R[tgk9=9&~:rEǘQAdž;4av"E @Ï#!ႊ݌?dn6i*NJ):2JyeAAPCN ῏:19XFkB (`s وأ*49&9DZ,p0'CC+ `OM+@ْ%4jTҕ]ca.Hljb2F%+ f햀>M xt2. vTh0,X!1ĀHIqIJ \dJ):OBϴpa>SLH{X}$e6f4V}CyߤY*0DK)$8%oә1$Ц%^ A2OE-F6IѓL;8b}g'o'6gG{x2; UHMI6؃ $l<]Td嗳ųɶ6Kfp1;U&8&Xih6upَjE6nnltd NR=r98qי4;o*hn>,I8< 4K:hI.wy9F#ՊR O'2Ĭ<]NvI'E7]=®Gi%ޜ<,j0c6L,@[ͭ "I,HlfybKfF`GW?ĴIF=29 H {n#6HY:Hp:^^DB.E ir/G Dw=$Մo[ٴlýC$)%JTYD :I50=g8dJӂ* nr7lrxE!±cY ~\Li4D ?`iT#"b[ 4isŤw'HDm7ᎭFKN--*, um:;'J" m Xt# D#ښTU5dLbq"hUUU U%,ëb>**Q^aIOӃ;`%'ƚl*qSf͉{ʔ~LO'njL ߲m4<|Gm 3e%@vt>q9 OʹɀLXL$(S_a}-3Oo }H\GyxɠقЦ.7-pq!"J(s'æa llб(rIA huw!ff lg*dck 6AM2rs_G4,\T&:jn~a<}ab'$NS5*oeissMuD,sT0b4z+u1lpф4b͢+BUEBp99C0(&͕ҥ<{2m,*̎i*0cٺ8A8Ab &GS1(䲉BF0$DY4ёLF&CEb< |ca։$`մp)[M$uթ&mbJ\کKTlO!Lvngv>N/%R>Jy.fHTJIZYSMUc *KMJI)6$; T*CPDmJWԶjKyd܊IlYF1T1BSDM (QL" ^FbHbj)HU2aL$%7"l2 DpcHӹB9Y UmUXf0bHҤ*EQUU!*lK%V),PiD$1JaAЁ3![)JTJ8 *("C-)˜eӝKFK%YG]6Xq H{٤n&҆#Jxyli}νv)30G % |78? )(B)1 PQ6D[im-t.{Z>:gQiJR-- Rڊ]wkc: Μ}b>}A,OE1sk'vNrVBG+:Nr= n4 NjGscOu }mYm.гeb#M3*ϒnwe~fs̛!<#a,m{SR,1u$fHrTc.= @q sXF/n{rnns[hN댑&BȘ F$ t)W>9&E:8+gF$<ܳë<Z8f~ni9sA#_B#?~>}]اN  qbjEo442O$M,=C@BM96=m鸐ˆ & `fB?_=JS! J1zُu(s~cgl[uT޲+Rs%xw+y\I`QA =Ża7KJ,HY[_{"gSsF-?^o#SeZO@`(a$:ATYCpGI g29hѡ̔!bY"NY vbJQԋ9YRw7sIѪlwƔM9'+:X#I$TJjB1?|JU_U[ Oi܈*ΝMB=dt;DXZ-q&S0R(8P/x7RtX3вؓw+UxnӹeOoC#_crUu{}%J%NѱR`l'E@~_F9W"a}C; ?ߡaY'}P><5? M)>g-HJ*7Ď{T}hl^N} ,586q#ѮK !آKcɻQ31ji |8Q۸{߲#56]JJcI#EJѬJc;*i$MFa^sg8-JF(p舫| O)(T89ob6 k prN;d%drI,$!"/~N9$޴1҉|;dUMXjSs^v={Ξ+>l"THLm dp&7CIZdO+alO}K7Z\uLVy1[G=f`(,?dtoh03HrKNM 0dBrl_*iȒ INOAf9g4t/s(c"9˘nm ^lT=#YCtYK!3a7v1Kq}UG]oSp7Pb*Ȋah@$XN+H/(;;RTV[j5#c (d 2"0KUtrw4wDn:*G0,kWaŒdzgͲh)ΚrsO's|^-y94QW{T# ׳*sx9mڽZdD<'!2py4[)KeJT)LK'Ni7oOVR^dr=t`L)  $ޢfA0pDO1Ӊtt^ (Vl~3NLa9I=_&8s'VôL N8ЇDݴ%6%1_š0{!yHVH:2Ja&]ٳ'tO6}qCE(w=$?M7Z1|Gǡ'Gt][lWH;ܝ:Ύb}0\;3Ǵ‚He!|}1qQy`@r$ ɲ^eRǧ=N8lƳ mk|)XLzp0:} zAłclӢO!b@)Hb\h!06LA&H3fOCnO& nF 7P8%d9hyl'&o+d{$gk o:Hy3$d%z1Lf$IM9S[kj6)cǑ7Tu,bkNgl95#$=ytkCu5ؘ3I0ФTX"DyB# `~| dALF<>"'4dȳȝ, 㚜6gL6nO;st'?iI5ޭOgVOlYp#,dK6'CB DīБ|Dh; #@ TBpp$Ƀ{Hq^nV1[>f$T$pmX̝lَMpӋ$_6͓8 NM"ʢa2FSyl kK%hr .x|8 ps0`(u ,`!Xlv̼6c:'RE!CRtqQܤ/a!6$#'6ΆodSJIppBGd 4;dФ2Q-b6a "%% M% R4wmRKQP:F@/x ChxP] jڅ.ϻ92.'; zR+}t)8/ Gw.zpFC@LV%vHr+NJ,7Q8Q'd '53 PҤI4<%vY"o-d5$pkflspCIIޏz* P"pzr"M8(<))OdJ6 Bq;WVId.3puC9G?+ w3z}.n^9{*v ׹Oaf$)F! A^JR@1U 혙 *I>ɠ&Ԥ!CsO,+))3DBJ{\``A$M$χ09>7FwWje|b$X}$xԱO^s';ϥ0,L2}z:0 $iLd%31.h~(&DuH{:B7FA9IX (NaF510Lpb@&_@?FyȼH?i<˷?V1G?q0)P/ox}ƹCRp"?Qg,4hB_I 2Hx%~,?i/'4概Rߜ{H7qؖH(aF Q Aa@lɘQi"t%I 'ٟ^mV^km,JƬ/X+;61?KT?%=׹ Fq30f)g,Ltl'^Rcd7iXIwborJ>UU-M1gWRQ_%7N½W݋#"#"OdHYD#~$ $  gIyУFS*(X>;4mP`4P.)J:d~\xD>w0W&~f<3>n>'qwrh4ŘEFY21ZcSuvr?1Z'Jh"!cyAO@aOnȬvB1p`:M0y7,6y+$c&,uzAcΑ{.88+3tt{$x"jNOG#ŴI}S b{I&$iRjȓz2mw'{#mk@` U%j Hsxݣhpp`L@*RM#!7s4DžK"1"1?c_dG5$ş\Qa}ߛ<ۈ07kgd2@d~X411 \P2aF 1(G4Si[S z|y4do~bw)$mVD;G蓺'37*SG#1,}m&:q&,<.D:kK7aNGUjKIeUByFSm)4,YpsSLI*!opRO_q+6av{+vJG55# 9(vֆE!G!غ8;ϘF|=0=OKL]ӰtVdcy AM`c K"uvko>jɻƺmK~XceNuQ,=+nJǂ;>H>e5ՌM" ~[G'WoQ2>z2 apIA/{ @w'⩋(ǂ< <& Bkl,(~YMO>m?ZÇULFy1[X~6aGNӬ([Aj#]٫g5WsԜK'񬆤0Jn!Y;1dK:Jq_*+1BLL>AcX& ыC?*0]':T`"LK{ͦBi\YT{[?S 0 Ld_7bB[x UURyJÍN '7 -zsFpټf'$rs9mH@l?-G-ɍ,%NdL{i>R TݖF '>*w|}fʜfUO6-{2$,uxa*N^K۝4v0U8Y4I.qߚ~+c1ZnruSTrlѩ%F16AZ] c l\mT p* U)Vd9ΤtQ ܢHWSޡlf%!)T!$S bȓgV>&S+zCJM"+GDo&,7lab,M84Cim=l-EU,rrC@--vN6T1gٯ95 di&m#VY'ō1X_U'JJl| rh@z1ZȌ@@NmIB؛)CΘFKP` lpĜt*F >.SڪdaA>vkOcԈ% )CIR\2BxjLNҺIPop9<用iǻLbLMҥXJؑ>s U)LlэC'&I[bIJ:@$;Տb_lLI$E>͛aM2()Zt XBfcl;_]1[S#QѢl&OgکR;%`Xw>mP _&HB'4JF4J_'^}q ~:[lEN i1yJ h__@m'!ヵX{3lnu'u*,nHI9 |3.FXsjb0hpjb{GKFD <=ބ$UvsO5Fx;&,u+lݿm eKf݆%GtdcSIJƉHC jz$3GyđZ<9TEQ48G;,-d1Q"}Dβ$盎#J৴i;&JQ Pbȱ$cwFki2C#rׅ"e[ZdI!2L6AHgG!O%'MUMK#ub$:=Ύ6cG66l#Q%N T7uK5IlHOFzBl Bh;{'zI47G)Gs &DWw"NnCHdgdd&iN0%m%8x56FM[wK@Juc#gĭܒ9=0)=ZXLofOP4+B{wL'wg#0*2oh䱬x+Ieǁr=l*SxX0c9h-5#͌FNtsB컬g18_;=he[.I$+*Y1PTIIIdETUSeL+M71&++[%aTa©QhGɱ8Ta"A%##g0IYtc$bRb=j*a۞4a`xCR\fȔ}D 9qy f ]tZ s՛ H8~~Ѡ(X:aSvd'S*GY_L>u;8{tYD*G9#dwNDD fLJ!n 4Qml!բڮ]JhQ/AM+vgMB|K C*Ď b7MOI8NgWV4}ɺ;;dŏ 5Il7X0ѳg ey'{{M#|'%Ru9VwDsG6{P_>Y:6o,DW|T9RpC#ݯ󖧧|mƥ'1:S0ܡT!ra93&v%OzVyw3q,18S&LcNKgns5Ej'rǸ6%IdN!HBCj D &2# Nб܍iOr6%)_BCv!K)v#o2}w:``1 c&P)Ρdqo/ѐk}2$t]ӆ`4A/~XaLjnI$8MӎŁ}zHa(ց|X+BB,Y 0Iv:5ɧ3+Jf?Man2PÂpLupS [<&N)D<$Q"%< #ֲmoZ,|L&."rXWxL$ )uং#)A&E*'f4B9m2~'( qe/c ٻ V1bcc *M7o!۲wpTqEjwϡ18Ydm~uq'I /I ;uzaK+pFT3!) @LlGF`iNm<M7,T|wOFMh<W1䤚66NBt!ªbƝ'{hwՇ9p"4&&|^ʩ#S;fЏbʒdR.+ +*plu ۣq<V$h&N賧n!2l{"ЫZD*afMz+ :S[.U)_w8\,3pF (YWK!^Wh_N5|P/5`=D-RL13b|H1 f}WE8IB d[4ŏZrx1ϝߙė$d +vBb ;$ÑAcLևX~U-;eÐTy1uc(9哝ǩ1ьp{J>rIɍ,6%׾`NzlM!އ̣TA#d3%SbG(` 0{ 6MI'DsFGs$h'cG2#ey9-GE{:(|b dՌ[V(%N^vZU2;xqCs'c$v*''gFͧܒlj6T;IӻdcᲞh/#6U,,(ĕ j3vXcsT#i&xS>Ci<<$Ncdo =%ԑ8z^v$ɸGŶ1ZFNOYBa͝{Om-C=Gby i)>!I%= 1ifبƘaJc4KHPr0A|zzrqa">E;5{vԴ@j?2}!L2,_/b~'?7|Sn I=>xyY]꒼!ǣRIj[rWSgO55*uKS%c0?AvlPh2\*|2e[WOFqGT$Yϴdn+؏~Nt9rsLED &F'c\fiwY5QCu r@y$ 4h8"cpd M8l:,<陛afdn)#+fÛ*nttuh룄*N$m._d~ Q0`<#|7>; 0SDt` nb}GF(QCy3D<&q(i:`A0"!ẽ6\Hb^-+^>=]s q)I8+FkI6mÂN!ĒhCw kI:ɳrT`8m Zv/vccfx+FC%gGF%dȈ"4yϾSgE`AY$^ea&#tgSbtpbtÛnDb=1n__0~3a Lq rZ3lÌYp3e$3t, :{PCrVԧp"lNHD"!0&+#v2M;|<9Z{K lإZVA!fpL> !G(<̖C96"OcEp;&'[3Sǻi_#tc黩6Nm'ДAi } ANN2,A^O'3SadıeJJ6sS{orn7FZt±+[5 ay77/JtpaO$W)2CJHp198ĖĩȹdzBiiaL1JSzw΂ic|ׁ#i=!HȊU#l͜H͍6XǰouN=LತurrdU|3¡\[m^Ќъ"|~D%T|lGQǃk5)%MDQbuy{.:y㔞+ݛ$wG(,ézZ E:SVJ='eUpuuxл1AxB/YA"m"Lٳ#eA1!Rbo9I:ct9zUe*Cf*jw9*-ೣGv ۥIMw9O;oCxȝ;[cKtШl;fpd8g'DNoa6d0@mL a$W8 @`wL8&C4(nMxNikwG}m8$vu+=[a$02ZJ);GiLRE G`hj@я=bzUۻ'|yl=/]''A/bU=zbǣ=UgUTi& O+ ^H:#=UG6ZTС6c\ڪ8c H-D1BT_x-]=A=`)SLV&a<[zcz{%i'Bc?uڀȤ=.NPY"sI{5M3<$˓,a3+DrNДJQ ܊$W#Nl#:cF*!Y TaIy5b‹%lJGsSqZ!d1LlNUH&"wX7dtl}*aR;2NOf's1j৫Irm> #Y=qsQ iܛ 8S)NuU#Ty4lvmsod>/ƧΓKY9b{ \In:mx~*[lE" @:r"FwH,K|yv2y2 : ͘ m#>z'fcp:?IjADGⳆJK% Vf1Z1$dun!ٵEQi\նj-iٷX#QVM$,Z2Uz4FjkN2p L4CѢI2>I4+A$b4@V܇͒]{שmC4ȶ +!jXyxJΞ1Vyw\)/,EJv8f'QCˢ#ո`C"08`Rf97Z6t!9)tDZwN~|gGFҰn$q3nPhLІܹp. 6,jas)Ƨʳ=b.Q$"$DP߭'ka_*#VΏ /j5(@34mSŴ=J>'j8*hJdw,zy˕!W ׯttM=c̨]' ĜSMQ9oϛBPcnJW- r,a^& x8,tTA&4>KN]grcj:.OB 1eϐ F6mӭCB^Dgfv6ԶE6`R~l3 A(y沒3[fcR绾`}YIS!-Z, 颂aL  ]ŘqG3ǐd!$ӪPgDž7x_rUT5Yk9bm~pqX)u9?of&bTaJjm"6q?s /RBIJS{埝N73f, cXtRNIoKd0Ko.I!`1u$38E42)ci ~GaٻiiZOT~Xm>%?&!3F~_BN%Hyn5NVfL#Ha]bF40[b5L9&d&'^O$~gNo}eaC0Y nҌG4AaiZ5Ͷe*jpb"1``S(pk ;Z(ފ)]|G:]Ek[{fB/=諙Z?_c33:sB;!>6ZdV>Smf/ >*i^CSF$;LTw>=`@o=V_'Tt;Ӧ`\RRfRD"_hH AX0H)&=r;W3G rV;,SwNSii]=3l?$sr w; BsU{S}ƕt:|^cfl=<ϝcp(fCj&Ax u&61N3}4pT@#E1+Np($+ٞK'5d;:J-yNXv/rH@ަn}Y*Ӯc6}ٮ] X؜l>Oa 4dq#5AhpHIFWg߫lɌY:,[27MF1#w6&X=$L喲#9EPw={fظd**}O= <w]ӫfӔPz|(= 2'@w21ԨONu>G)ΏNJ`1̼pLtn3~6rVL@I#C6@3@IY5A ֆ L @W20,hIPr/u D`dXXZEy5I?gsdӣ${'N+Ѧ&NSHgR_c|Qn^fnˬtDnzU3 m `hzCs|Ǫ|Lf1LwoYi0Z* dÌ!c\t28`Elp 5 KFfkȎVp0)+&TXܼ7~R0V1pamiꎬd{k**mҕ<4{^s;I~Bן> I2)!'@n] ,BᜃK@S&icgOrF:GNRCrn' C֮5-.id|'`E"A2Mp,5c yyyW~翟wfȒdsM.Ef$xq)#; Nb5W#Hws'CXysTss,SݳV^ߴOSi{osi:4wvv6M=CF::&)*aa㻣>-m2kL, aYIX;*7{.c3x]u8۫Tn |nφПQ?{*\q\sXֵ3.332y&7YZ<.n?-AnxcqA8Yя=@i7θ=}/Y;,H|aĬ:(B9 ͖q5b@DP31-ͤ?| -m1 qY6kBiYcAκ~5rqX64'3AVw6WHB^ b&h~:x<# 9>ʮӍ>΋L rkٮًL)$if |΢`m:m@M͸{6>6[v۩TQ"R 5 ]0Hy?léIvͨGVcm[QyI5ݣȶUG9`~u}Niwli}HtOΘnJWdp*{L}/G7G0GNn,RһԏsޅY_GבސtPfNuxNn3$nm-oO;Z;3 EV6CQw**.=H5bI$؁I\~yo'/5Ƿ!` Ѩ1 D56քR^9xXQ}eDV\a/b\{h @ hڊ#xbVp-$S`jk'a&[bD jL JȰj9}ByǢ׿1ѹd6{q0‚ȳx z7j u1# ً [2wqfnCah9Bn4wpG`ݍ/'jm8pӬWM#w{hM,f|ɜD ~c8~c,6f}Pn}$(L1$4. jA2@HbdʋJd9a@fVߠљDl-1 D@_]9/Rk|jm#:=aawtw=<5QLr9iO&B%C^0BKIiss@HVhĥfd^1kҌGQԤ՟S.&'ܟhn~E=yD"I`9CZU1VW:F0~,ȧgA1QI2DCAKV4-4 $ݴ"ɥiln}T1l j.v` t PZV7#83-mdQK4GNц&sN-A $l3qLi"*75LNbbC(B)~$wxz 2n& f%=bZ8>GHZ|x1eQ"!`ԈZbbh*"BmNEG#t|4@>[j[>39O1DzFߓ˃c{棕>3O HݎñF68]e'dncޭINOryC=|JW;jR0t[mkZM:=#SG| tz}+lٹs36d=Lvr˳ nݓ$`F5A"u*r#@ڐcݼ|nTٳv{H͘9ͅ%"mfPjO5J7v%t^w Sɳd60Wx{яgXd ܵ&wgWkUkfCG2!INa[n'ɺ}Z$r0y9^mc;ǟ4(t KHPi/4ϥ0L(h"Rk(4W:j"X-qD\ L*fd^WBcw{ Ia\CX6tI@Agh:88GCA { 5Lwlty| m%cq|ߛg0z oF @W\EweEB_nZeV*2F̄0uτ>oH9mL>HÅ|IʛD΢\iEx)Xb&R/ܒt6IZaQJ}ދ_Wf/L04h&!vww1=QI#گt{c6?/SSc=F'|\H&Xq>/Sc(yp zc7СD>rf"0**N" lKI)$I/RIIII%*5^hI氿$8Ƀ#33?8OerHa/iPU'V W'T9yÅNQԤ(RyԇPVs$I5t]hUf1CS2?ISkD:()#ԕW٫"[*b[VSn*m*<"O *N6X,9N"delCU@oneާ5 O"wd=~cn*CQ!ᇼneW7qZO=ԡ gç_bdt_шeIP,N= ږhtAvm>wXd3 \IHDW|{V_Qk|ЍN>AcQF}Sv@!ul]a;NVTe>}mK1aDd)ĜHbB5+5 Ox eOJ]1x8Ֆ̺]BXd,C3VsSH%⪍B”:k3IƱjN9D$IW}ۍxpQI$$}Zk@o3V( D{O{.f31x 2n8E6ih  P|@>f>{fq0# 6ZUCf=dlA"{||'3j_zMGtMsN8]w>0fOrCR:'9X CS:LHr{B)Lq*z7`Y#ÔpD3 6U EHL]ߦ\ҔS&&fɶX bgѦcE(jXT ׆ ,֬yI! ^dOqn[Bzԇb΀=sT3ׅI2 ݟOb'IJtr7c6'^어k:Wz,5kFiڒcȐM/A|+D#,@2Mwg9|3vo.[~ 㼑rcm1H਋g;=OE:ÿr>&O8$Є3`On"Ǵdc_JN:eDU{OqL\pؼ[eYܝCCrxiĎ]<< :=Э= 2'#WXZe,NP욑f_o)Z'F9KTynpk0U~=S\wd [H M^Bcc4;"r&L+6<*BI6BʼnB?M"]߽@)d`l 4s\>E4)l2 E/0nY*; 1Ʉ4ZhL@LAMc Kcz{`睊O0*EUӴLFfk]XVEYů>(bA C/]yf͉g|0I$ Ol4~7˫%v2喻>^$S|ec/I;0˻2}Y9YEQ}<|_:>gnj|;]UUln{c 7r7W5E(ѣc(lJYx,&h,\% /|Zsl hz\t]s s'#{aU REHX9wL'\9u*$Z# Zonk3hG:D()0$ J^XsxИDE{\vЁ0(<ǕQ]qe'>.\n$č;{݇OZ-`js6 D7͉VLV7"Da!ЖJ1OxRxDE\HrLduvqt-sUTDGGJuve|W{.DU<%%s-w#|8֮CyDLGܱ2[+M\lHCflh(ݯ8X! KɫxbƏ<-j1UsYԽ˔W8\}xr>qN\YƵ2rLN1=2A WMӡMw1$?&a @ы/Gb'|=Mv'|?K%Q"C3D dGpJfD$/hW~vD=<ƽYBcq7 y@5 6]ljq2a%X-636-Hԑ.iw=fzNA=,{vn^[26uM/"42)r:nm8$F'~UiyUWNzx?QѲ)j/029j>y\{fUkbkl(h:ȶg5,1glI=L:C ? ;~uu}p%{*XW*ŋN)|aUR'H^cAb;a/vUΐ:faT,;F!Yr#9K͕C fI\:58UO x _EDׯof?eE /vt8W#q25ZNrvGg>OUNhU5u9_"iڒx {*?5O u eKO3 |#^!I>]GM`G4gpUOFWr/ٍuHjC!v.&٫Y;aG5Cx;97߼zEavvkH(/cOs+hu>66T{NӃ+uCQ>9 =}Tdx|M,4m~)8w-ο78B%RztU;^EH'3=ws*#S7Oh{zzF潾} :7/Naӧ8f IJJ'vW /CiwO6Ӈu9XpKb#m͢jY(pdگ)KeG/lZC2ضj3wA34E@q꼘H_;c||(ryqD5N}+#fBʢLctvC[ÿ. `<}ݡ"h1 jø34N}H!2*Q~;a2d 0aDn9 PM,zN6j!Ppx( $](x [ف <3[?1_A<϶b$``$}m+T}䔯xr;Dӧ8:DNN8r9hDH#ϡi2H. XsJVS6I0gXFԗr $ L_|6Jl&1Ϭmnn6*vL;7 s"PCDvsuiI='ԝ<=??o[$făο-Om =Zy[5f PGs͇#MyN}6N[d-DT1EPY%2ǖRpdNP.ߺ$K!ZY#zo_W)Btov}Ѯ^:eCHSA!vy/_+=D䮐Tu;yxtkdN* _廼; Egp`_@Zdt|'V/ل2`)&46!;[_k|Oj6GDl)>r4,[]ucs5":MNziAtPj4Jvi6itc͹kUc(MK[6AESфF*Čb# JUVʍhT2 $ RFRr ʈfͮ]ST%e $I)eQ3I)1 ʿ_s^U^]$4 a@H #6љbbXǏH1fnpЇ>D|)rOkN*" yޚdHɎ?gGb"D(L,@`]0kRd7i;:4:!^f7$?;  JဋZӈSpb/0srp '٩hv0LQE?y &.ޖtTZX'ɡv⛭Ԯhc?'a%-\yRgD2TuiaJXJ6p:!Ҕ]&,ā L4$^8Bi4M!*vlufISM5F9c H!)xgw.dw(NR0 ( aa>~S^zV?&wi+T'溰R8M(fMXx[[OC A#bİw+=V)X4qZ: 1 =|kQ9'Sb/TC .k3f.Xe$dМ4mi7\5Q`n^-L5L,~&,^Zo-*ȎU?t5! 28Jb{F;'<_K%- NZ#Ýb͑o3ΈuOvFFiK_|/k71woy]zAu"E"S7pmMC:!8  R)ևRFO_M#ӾFCeٞcX&6>27Ĝ!j1ѾnW%L0 ( bC'rCjPL?΅1& "(P +h Ms hsQHH>fE'uKnb]~~PHMe8IvǦ34 UAc*=p|9S޺sk3[M5mQS2p?N֒I;ɿV ]a˶m lMDXďs߾)1jOb@  Ivfвl*K͑NJKbE]-sZӪm;~S_8!ȮKJY&gMrBB̈́HDBDḴMIe4Ij̪R|ח~bSݛ{YOyFΚ{&%:3'|&H:HB8l-Mi! ܲ>ŋs&Iۜ9K)aa-UHMrt}Ge?̻ͨd;ꍚU%(|;HZIa-b$Jh}:S@ j?%1D]F<~?s)(U*R VL~oM4)&\t~ TekжUE6߲ɘ0$$a;œ8yOx'zɁ}$4tFN110Lg>61[c$@$MsɬIIF%0(ލ9牂sq ]h =8i`>2)8r4ty,vhA-"T ?V(Kk(b%)㤁}1ɳkl=/|z"3/#7 gqw}' 9MRR9I0pa[U,5[n,bzro_lP- 'qwE6s"!rSM[c4FzaDk͡g;@`ZSAUK;䍍"86 ȰbyG{jCipT9`?#@NdJ$PbW$UUURI$I$ծߟL%ә1CRA8Rq MHa!RuѪi SꙕWԤo Y(A3h0Lqg3v6шf/Ik9YXӊaLK2.q/%,izBݳ9(VoZd("+N@L)An]AX ;wZŠraJ#sۦЉOɯaZ$m H""_ ad`$)!iƇD$P_U8'RԢ y#ImCܮ@*`?@ C,)O['鸓fS?6OEVSmF><~`ÆR&ls~O@n0`+*٤>O.I+-Og=l߇'rDdJ ;P;Ztr_-Y*ܵz^!ln=}ؽ13Tbe_LfjTTŒ~A189X55M `<22`A 1LdD.2uRP͡654Ft즨~ΘE8rn_ Bؘ*Ҁ- 1:4fmT:޺$$*31Rʍk5-vki(ImJ,TTRңj[(FLLlcc6S)JT)e51Jl2JJf M ba3TPw>%ifBMMvm+LtHtX?F>vij]uݵ7ed:a0z!saĦߞN䝐Г.>GHk8&  A X!)22YVRdM9,zMF?O%(R3 $(arQ*ĴZ `V|צ;חI+Gޙ`-3?1Xv&̶~ShxX80^SqŶ6<8ӛg&)Nzl ŇM xnrbdSd `@"$$$á]9c$Nq=O iY_]$^dZ?; LYV"iY,GM8Fh<S@ LO_fQu1<$7:/+j$=?/_ A:7uG?C²'=K9}h4@;˷7WGo(fMAC m%JUo m8ߏ1eKpf /[औvL#ޟX^Mg(C^5?F`:Xn(ae ;]A71=P毙z5 d !U?_s+ ЀEieF^`m>W3I1ڧ) s%"K>6{3Z:m]2~7+Y)_nd%S*ס #M1{rp-&Lv=ԓO1O_h]kQ1 hL^^>F_v6vK~D R!B4$B'|9=ăaA(,#RdXD1{#$ԆIA,5I+t9a&~Gn$(pUq6 *z£+G(e6d!9H~`LH~f9?^܎t#w1#fX=^&D}I4LIOW^#~ȍ!Nz? V}P}H)1F)ts~Y^E10ĀaH0wW4\q^A;YSP S%0ZĪF(%$vF} ۣqCIN}S<*) &:X}D1,U}' i`X{YCϲ:$J'NfU_>MWF_U.!S ~JɌT< ~::Wg([|fb[Ita+qJ$٣/_#;l&S ?I}ʐ>p_C)zoJ}eތsP( /Cdl}2ə.؎/1O.c8-[b3NwlV4V)yt^[Y"V?\VLF?aF)o24 GrZR?C\۞j[Ľ$:q̫徬̩IҋKNMu|Q6ZduOqtrsg'ShͶMεIMeRJRTٶMȲU,Q^py=&%{kЦ:C'#pJO~§-#K+b:8|*L0ē:: 4Giɚ{=]f~^KħBԋ!%pq(iBňpIp0FhF06@E'd;d%!BdA1- lą` hb˚m֢:R uʯ;qjbɓ;$bRjSNßHKCEvM48 bR˿.M\Ҏ-f3)7OB,%<{SyN lf@0QI6߮\nXK+ښJb0Qv"Ɣ#'v 8l)uSQіhR401R˵‰ m(kg @J*Y 4(AYLJ]une ŹBGk]u5d3B(ŌDcu2!L(L*\K yrSmm%f4M>m+(JlL:z6EkE X9>4Y382ǰ49) UIQ?bG#JE}SA!dJB u:FT5o=']6lwxQ\I`?it]Wbӏٽ+qHi iHuq1_V&AS @Ѡ|l Oq.QQf2!>&4b4U #$P)c)}xP%4;dcŝLT$~"~q66BFh x B[|DD8&ׂUdǽ_~oO Նb1lBZxw~u~3Nj1f d-1=`(eCϫmܲBP ,ϳ ڶ?#Vf|`AiMcZtiR6_\0ȴ2"Բ)[XU4IM "荣 v뎶ETl,%͈dl1S t'ľZTQM?a}$"KoU&Z2FQ%?-֡i=n49m?D5V~$zgAzyF@(SHx R.t4R9(ä٭idS V:`: y)`$㦃sx>q / qnTт7hsNa@G#J٩1u:(C88C=˱iF0`I ht־Otm>e{K$0|}Hnb~oh:t0{cvO|`fbDohgy $ 4JԔmJ/9U @TC S!0G!%rE0ϰeDdv)@"[DO( 1A-1,T`l@Њ+^a(,w 1Đn`Zi9_OiR8T z픴|3KLQDxv]ԣHag +3L}Mͱ;ijL%e tlmGEYzJ;cxHhTz.Smm-cKRiA[m?`WYgcV#,ޏ[**Pz !;Z:&u$3,F", z]v9=FVBII0Ϸ0O{rGޔ+XCt^%;{'U?Sxwҕ|d h&4fPnSdlh+f2tb\[6_sH8FrSM7<&d1H :J:m Ó= pqAQ"!1:Z('U.6b=Z;{p1*:Na[A ;8l39|NO9DhծeX8ERyS~ :GY:/s|_KjHВAݓ 74|n.tN㔮٩݋^3 i \dZLc.Qf}2>.8G o2R$t q$M!&埌Ny{8-kM#jrqvcehdcm?}]p>oMY5$RQjJT (HrC TJJUB,$_\Le|Qy ȤRx')M !iL&S)SeLKPx"F $]ZZ5TVК-5IV[`.M#msTkD+H0@J,%bڶݚjF H?5sUDUXzGC#NeUNΟe_]19y&ʳFK_b57a\c~]O=! Hi6-8e2 ahԀ15gC #Vr͙ rRbK(*K ŘN^Kvb?[7٣N&YE,!EB bM0 ݇iW!ŀ 1=O' 72_DwíPamvlUP(~.E>C+ʿfC1, 2אYd&*F4'v+\ "4OsZ2 Ĥh yF㸏 C1m=,0y' q%3؍unjZU48d0{j&o)RmCVZ.wDj~ij%'yؽ늫]ί8o5Dr C2chLkXJ|m ܰZBQggMCkd}+̳ Bf4`ZӬv>2's'4AU Ա0o } t~R쏥mMUG6>}$%{twQZ2MK[ ٵCopyɿR15"CR=cϦj00111)J[m[mB Ҕ1r#$]|aU.mIIhuhف8!A9YIJd0(M;Z?7*vy.$1z9͜ppx)lT5(:̤w~ ^'+=?hD`ekbټq3,4añ# 0+:IγG in* 3xcfILDՋ<{i|>7YÕߵآ,@ؘ 608,v jNkT(*5W}.WXZ'8n>^P`i.OeD0{3Mڃ* Ur!k$ 1GT\1" zcp +l&0r[F;1{s2Ad ˻㱋Ñs}߬fu~:5 ,-E6wki?DzmOW\c B,f`3 U\I=WARl >da{Pio9_ctR=\LY@8Vht#CA$ E+JVd*NƝsD#Ul45!8Ee4QBi;3$0;K 4%- 1PՃ &$;JbJaAȄv'a=%hC$vpr*Z`6_\Mָ-1N,+ p!hifHtnˉu8Nm[syhD7)'B$N  r&^:T( k0O"Ns큊C s2(ffU6٧*簣 yb7O52Tu^m}lhos3fF\ܖ69p}xhU!%1IoLs$ןlj4+[!&cM  a#k&v+ :8|E0vlms { 1љݎ_AѸ.6ucCx?g&B`@a\!KLL,yZ÷}8t.aC3q,p3pƇmD&a: :^Fǯ);apwK}Cdvfa!zmmC ޛܝ(1)laӳJ d n+Mx30m S6~dD59$0T|E 5w%CM.Zf &"v$4X43ikcxxi9&23, 4ӄ<3te`xc,GY+B9!qåݲ/9ɈQfLDjW6+g0Wa#C-ȧsq"K(kЦo9s Lpڣ4atRj6s& iyr᛹F2F <1N-b͘B^ E9-3@ANee`,[wD y$r^lf Q:# 3a `v^-T-L?D4{#=oCA;2$2ȖB)EPhRd4k \Ѹ &H=LO!n= m Q۝ڣ#Fj G Pz lPoM5ѧfay)݃Ha|[4Z'u%rwv-g*lw+S{SQ˖UUl<&5iT!ރijp@L 1ϫV<y`pCiXvw"mZJR׳ϴ$Z5q"w≪psۚ"tZCȸ:F  nc^y87K"tA$ #:dKCpqg HvȈIzddqE9 ^+?6rRn" hFuT:b!6s#J|bq΢X NC5lP4V|n!G؇t[]ZjU@ĜI|8ќ >E=PahYz *c]FiamI ;J`M >vZѬᛱ f n 29ny)]]4 H։z8A`5d*i#mI*;l#8qK9EP6 &`KH?W4n뷻'E=opD铈k>zQ Oje~Ys0$جj㫪.lw6BL>Iı'^O!D;jSEvl΅ra( ofѝ.V6~Y.H:mtd# ThVm2QރͩՒl gGF΢irr 2Ɍ;g1tv*e'čj`ɱ}Aj. wa-n@eiE0$ngL*Mz]>'iA"L[WO&ŠXѶsّl['3pu#G)ڂ%j`4 FPck "s!3~ބE43ե0]`v2Il=T[ٴJ rq Bjkxd:d$(Q}zAvMHP: gBuj6 v(9MҬºIJӟgOtHz}!l" (6#P؇GiO+?5aT?}LR kQ@98/d;y\1`9;XԔv\˳Fbd8/lZ$\WФ` h9`L2GnwCHb>5HY!0O`T d)@?JNޕۅ#bԼi"|niZ>;2m3i0xA &|QCZd3ݜq\2EIڌD;Y).7oFm:ţi䒨 "1xaT~_ X s(wwk=:$Pad8a+=Q/kFK@my]w͆.`t?jK#[km~W-95 ĥkiLzY*ԕR"a2_i ?NwC3ۑ~O*Ihx xgL0 $楨[$yQ(P(@16>^$Rg OS6X|[ݺtc3һKu,͹vň RGzM#a"UMe}XI:(-$N?oGqWzyϲpzQ\YzVk*g.b! (\* ! I[6of!8aZ1)A''uAu!أ,A#8ym(- =1Rx׌=9-Q'uqOؙ 6,QW),` < s _|$(hKIB#$;=8$fD4IaQEQMG'"D}~?~frBԟ&ȥ%6x: $r:q }m6Jjg5?mVռ1VU8qx#V;m$ʅvRe3B 52)! -^bf(bM*'yϗ@Ă)KhFЅt^9u>0;:Osu^Xs)D"u^N 硘ne?1c "({n\',&@B2SrR55Oo{սq2t~#҆UMŊ@Z?gg'c|#K[b(w'c.R4z}~ 0W_kAX02ϣ|GoCD$&/5ϣmcf;Pņ->u cވY:"t1wԾX[Bi_S7yGdJl+UXOtQ.4YKFY3(ùi}[WΙ3$`B90KdA PvI.61QJKnF% giOo%pqI}D% ^l bY,rh!KIdӅ A$f3;F2;(Nd V*L:` BX ^sTbik3l΋ m> tr g786<: KxN5M˸4 C#``YNYĐ=z@^tL,& op?mL)YÅSNn1Ke2,鍣9/IDtEc'90X'')F>Syļ{'(%9 z7:J j|I1hD9ؗJbHl5I_MA]cI>"ۯX÷^/}aʏZȷ!:` ԰(su2AM -]tnF;la٥l1҄(wq(4N+ly6l,rnQ9aKIcFZ.D5cZXT= ²$O!󌘜Aq$@J0Iע]P%ZL!&  O!"{A9< >y~3S1hN'X6ݬ7m,a" | JZAJ(DzR'_}i'52 z3 XC͢SvvIqkjZjoEy$);B/L~ Yz-qSeJb~T87þN;6YH-@yRGVtSӵ BNTjҺTNhԎɋ'40Q ^B!0FeD*YQU3Q8~}PXiu |5 $>+3fO ~ ?7E{Oc,`M oa6_9|ϋn3YܒL~.|F8-ih6Y97> nnl׊1Gea)q2o[g4r`!L9ueyvqu!>v[7q%D6 }8,+gJEdzy&*hgnҊ{Uz{eeEђvY &bq[Ә̾g-%3ۿGz΅?%>Ꟃ(S,M9)|y{BI)zr_.S{Hn;C66O<iJkN¢Q1';@W$0s!{/.e{;F:+4PLʂOl)-S0 0 SF,"2m oĤR,#pXUTT }=7zs=y8&2G4 *%Y=h+Øk[sݥWD;%M,(!7&&""czm%B0$0 M 6  M#c$ |ƒIA?YdL5Q[oû QOD㍫30"X"iLSXLMV*.0f%P$#!%$C.Cf KH^a> oġ+%tM;6$u|h?wǜI ,c UU|q&L^C "DUFaI*9xPTPr➃IbE RV[zjEdyąJi!B(W PәM. 1#FFiA]l>4wD͉ޒ*U-ɽ%_Ljꦞ6l}j}9@ +@_HO҄H=X**E R&c'`?ˀp3Ky\T ? RɤILfuLҤ--{%EZf뮺auȤssv#`-,JRRW[^Dm TJU[m)hZ"r";DTViDhL]%pu9TE0l"`c݈y>[Hdչ֒JG% di+.@8ã"Mk=OUБE:* sw98(F_c@SKƔ f 0D0O>熇լ zztLN 4/nZĨ^00DøXW!b$xd[䣤mr3ewm%+٩\9v)0ъ M.cEA*( dN HBH`L3qg>쌰'Sf s6LV\MV`RI&gݳVi d,tBm#5 +Tq E)\ۭz' %VƗOQ\Ta~:]O$%=(cݽ35a1%*h]>1,MMp̔ї<ș/Sl $Dǵ}g}?J<8ĺs/}rԶ4˼9yFOFoJ11omEs~CNz,,K?Y:s7uD6SY[OvłΗ`v]:M âD+!$a}q"7zKn"GODBC͆TC9&$Ai,9+"o Yߙ0;Îͣ+BJ 7v!ldbA jh ?LR>ShtnM""$֟GPEO;V}TEQ$cC{{{ƺVk?z`)1dY^7y wJ$${}kߓ=[%OmYq׮. ~#g#iRO ]2IACԞzݓxqWuu߈m32f\lj."p-y'sX**UrA.}uewbsm7F[Fi!LH7 -ZM#9ds&\ه۴;:'\fwmsYz{ʓ]zdBh9罊wi 2po5b# d#N!Vɮ|i0|vT5%UɠK  @RUNi2pz=v񛃎8p^}/ȀdeFPL9/ݎu`B̃h L!AJ0)$M9),M!|v%^0eYE:ʐKBV"bf m6A#ރ:#f)ZD2FZQTTB$ DJ)zy9<ׅ=(䀔}^/rz ^`酼އKYU~~P6=#S~ [V,RH! ͆&L6JdX$1cO,o21ð 68ƘQjUc0LK)EPq>}.wX/1iPjCJ0Mn{aі o=ĽI$YGF%w}X5(8 M%U@kt돗ݦ QuKS>C}XώwtI=ͺ~ډ,? ?-,fo[Y8s@DH輻7BZLin[BPhZ :q'F h$ ÅYI*qJ(c\HҔV `Pl2a  !Z&f=$KT`&%Ν1 ){I,A9N N\ZZottIS +K0GRJh@ 9!'hFh4?lrA @Lo#f $cu؏aKdr> d<" JJ(()i(!0 X&r^O ^q o&"'UD!$B RH,J2ՍlmEm&eiMs&MlhEA2c1a!bAhYedFbّ5XE4[XrZ*ϛNo'oXKSKfu(-ebK$ZJߢA0GK-9IERKlȔȈ _@FSZ=aAީw ("(NRV2,JP,%z&(%0*4T1DD@tsKrWpuSlQIq==gByu9K&TUmԐJZYcji)3iliE#RFS+$be*I#5,IU 2$.;jfm9ԯSRİ6w5O`lom5}㎠IE ';?+;DjN}#eF{;m1]뙂CHJ~n&DJZHQ~{'Siv'͈sc_Ί$C!  f #4_%mh'ToNjrN }C1g6.C%r?u_H!&4?rQ!0X \;42aDD2#TXl"W}k\_d ~DuOKŘ3#"{NŸxwG]f^=|:mϖfnݼ=C׾Cf&DOQU1C,jIhƣi1F̋IU1#!ZBBY4FSsj1ER4:gܮ4||! Ƥr䯰w߆v-.-C1 ZI6$1A!rSLȏUb13uڠjy=t^ (Дm%"^h@S̖<8[{DmP(Lс"wŋ1:XЩZPJ&r 9'+Nw'ϖAd M5}eͩ0M"`G94QCt0@*17pb&jŲ$hXM$ҪɪY$4 RTBIb9d Ƃmpm$,* )VR%vLBRf JT"$E#F{|dqwa yȅC AzɂtO |i'|K>=t>si1 >p(D7`R0Ngw {].2"7=+㦑Č!>7x8)I=N:I@#XM}qcd7׮2oqS= XfFJ}nzvAu[\ J(`Bi JaP8tR3l F11(K0pT1ښ:2qC5͉fa:О˔Fɜ١ dSV&p]k{Hؘ5oI)Ƌ"$!e*)8\O|ogN^GkƋ2NomV(,:辭<Uy;O->hޠZҪEX1M><=ܼ:@sYDK/Zaw~hb !ȶU8D &8 Ė@l6LO/7?Fc[q;?Se49ztbҊDvmy|Drt>fUc1 jiu rM {ޭ}uUqYdpeCnL&1>j(*Pd R(C넥PC%:0N$6J8@  XJ(hQwTU@#@"(BZrUO@יi_Qf!9(b9qgnV&#E͵S*#*) p`fd#]l7 8Ē&T[ )bT<0?CP,0HB$:4;K,ЇT}jyAK@{JL3p!A٩؛ԍ#uNdIOu<z)0J^?}k1ɚ"mg?Cmh@L oZL/?1D(Ha#@4J $(wCH}p 0u3&I?!Zr5gJlԙ5VXqFEkX5"rfdIRl' ^s1BZO/9V}EbִCyXĈjFthi#yr|YQ(@_"X`kA6㈈aobv͊\M`E P!!oHaH& Vb ( ` RXe aiZE`5!ĈqlEBpM}*Hm fR6%Uz۶JRd@Lؐs&Lb{}<& Cju%ːQ)§ :1rU;3mEI@ysn ;7tYAt:tআJ~ХJ ĩ mNS0?FɣҬ4dO?~9=/EB#B4+j1}Shroqv.kva\&[]>ֱ^P'V?8tR?$T LÃJj!LA2:`(P,U-Y n` &c Y{vrvɌ3qL%a!B!?/Qjz@־ŒU [v<!1dȁȋ ސ˻EЋ]Ց!ˌ%jt(` ()4fVs׾]W_=q[iVDI= /|gf*I2@sͺN!'9NPZ5Vl}LkS׊džsx,[ V E#A)c̀wQz#%}BFMUeien˭Ya5ISHN2 ;>Lیb'c +`sp@\81P1٣;8 %=rڙNk&sEkF;!0򑃄IK{@mQ86Jb`-  ͱfWbR&E7H팔\f=ꪊG4r66o *oDR&Y]UITd+)BiJlҕM*n2ĩQ2mTPmc3` A M%bm2X2KIUe-*m:6&NY$!P 6C$(b լJE$dM+5rꊴ&c8eH.jS+EHljc - !*F*$b@aFJX%x 9^nk:zCa qt[ 3rM䃚%'3pЇLI$C^S*k.Fڍ3 gW9d=ȇ z1z׏[dm}[+_mcekaa9~X;"kh-4w 2~ WBd-[PB …@a66&J@:0ޙ Dg(&*$)e R<NTv rV & *jt-`q'Mу@)Y)B^W8"F #m14= ѕNI- !Z;|f$Jp,y I shxLd~Je֡Va fd!g˜1Rd{ fOر *bSJ>r@Kmc9Ƴ`M'\{HxN ѶD@X'(bDe` y ʒII;*܊hAdiҒBQ4)dH[11| RkD"PS[u&С}h&#=c?@U;@?!,J "7nӡK(?Dm4% qhcVJAq* Qoc+_qωI[KA/m~e"tI>|d\i^Lp`2Fӟ C[ޟkYUhQ޹, J#,2FO0N`Q>&R\L @HVT;H`)RIIE9Їm9r[p8?uRJuNMǐh ɆOz"iݳC\kd~VEFXHN9;ݖ.L{߳̄V %E 4DoI$Cd"d4@D&DbC1R؃i",lm%vvJn6eD5!WmC,Xv|~Ng'&{ =\[s<Q#Q~8&Qj4EA h)YmxTF;RJN3'*jM9vob=cYOs{e=1)[o:14DYmD8Nya;Oy+3uU4[ /? o@9vOݲ'LzyF*y73MS6FQkJ,.a/XJdO M9ӘQ8`CAqx|!X$DY7& g҅ se3dw#$a 68>]%gs첎x}c7g%Jz.Q1qƦT:6I8!d{(6|架6t0̐9p𓃥x٦ZjQpN3"T: bhD-ٙm &5`Bl 1:p Ѱ v'U?ڽޱl>ѫMԂ)d&%&9A46P$y,u<4wCd)KfS RlD2w`dY~6 blF#/0~ _S8H@w9$w&ɟQt}W#Ĺ_ݯB[8)`4DD[R?jiRd32)I dSr%t r"9+݂@MI$$F I8pb|T?/|[>.;k:2k52#cm}*@@)Nz*"a"v;3"q<$K '}#(hvzkGUT0% vtͬ!ioCRNа -*r0) qb{ny,"&lq'm'g)KwlJ19װ- Yr)Cn RBga8 $qw$Ә"Q{|"e+ JpL)MC`5LLB,FRbBTH6z |ɠL^r;6Ʉ٨f4YR&b ^g%}p͡l-xxŊΤa1MN=6'tC'I 8HvgM*ye+]8NQco'm>,;+g#OLxFp&"YUQ { , Icdn'qLmx7M7(ϙaOF>r oq=}ܞdDE,Dܙ/JߍDӷ~|#؄"yM4wvF ˻kdƤ7lX+V#qX~N'*b҆ᘑa>a G%?`kژ!|scȤ&@(-bAcIXDrPlGes>g[C y>lDsQ8D'y'^܍6Ƹnc!<tBGs=x&nlOX]7;ߗMI $|ត I( MS EQQiߴa-'8߸"{ؽX h>e~}3z~֘~e[͉)Nlu <}*>(f+ t@N{K=8u'IDɚ8,ڛ2JBdb;l3'ԇO$)'봐r[GsL$X~Ӡf2n 8nnd*]\La~G{=!ZJ!QlUO472"dᆦNq9rb( QAE.2N 4ѡˎ~,9qp"= ܖ$@Ӛ᷑s9ҼT6CœNQSsands},#rxxg v>X ҨYP{ iCbҌiKIײ$ͨE:̆a9৫бd2u%ԑ̰%J('r_fT5]ĜSikfF7 6l=u5  ,.a$>G[Au+Y2N7TpQM*Zs)NR }E4EZ%jJ֒mc%1$)Z0ʶ eh/6I~7R{s>T~}PL;SIh ԇqxИAT!Tdx`RU6D1&"J8N0txJwLɆ#0Zd``ŲT D]Ҕ7e5ؖaJVYx A:TD9J E-ƈ YҰQD iD)lH8D"2Ik\RVm D։*̐aa(R u8&A<7悛W5|sc/ƬU ĸp$(X@+dDx!RV1Ɔji0`alXid)bhZTI;:FiG;3$5dK "鼳 N@ƅB!JA (̠Ҡd"JI,@,J@BFRJ&ѣdŴj5EV4fU[wӓVL66TERR$?~y@u4 HD=# @EKU-!Q0NZ?#br)%J/fww._!r",_if&$XYD$?,>'ݍň:f'3~qvq).?y6]xuBC>$ 4+@Z';BRU$X`XfC1 mجiJDib)BlmRw4,RP08bb Jd%M($IHT"PEAIjITSM^VS-dV-k*JE$%%DĊL P˜f9C.Xw+ e[$!<`:I] 4(RiVPm!6oVpNZ{V$-)P!#@AJpI$Jpr] }^$`R`e!~p61rVр KKo9Υ';]FIHtSF quCssApi"q.]a`Kv ؖ00uYl(R§[@A{BaYY P gS/7wYd妥A|Pбìi|OȘd[KTId"Q Cz{4 >1< ?b(i80{:Oq$w:]5]!lBD)Cu~ f;z\q[Ԇa+teS,PQ#E ?pI"dq57CӇF&M6SbHSQD4L#9MM% $]["3 H"=PA]dh?ӎttej̵왘D,TPMTeŚ ! aiT̈{LzĜqr壋;ߏWR[_DkUف%q0ȜB7;:q*Uj;V!r]rxYθmWhWAN!gBB(LST=b QK f7$>:"bLI Lt.ewTeiPmK=}G\oCxJť Dѭ#H a&ʑ` Lllr04%R&*4aEA9LD4on5Vl\"*FI+D,CM?Y$~Vǜ(ijso5bkt{?C A*1,MeMU+kaGg4( 2#Gѽb{MXkp}kq`;MU@%a˜!l eddCMdRMmPآv&2Rn뜲ʺ͋۷+m{uK#k/-ʘ4hTQ)I26Љ4pE2ELy#P& O%ÌU0t90`OI %(Dt?Ֆ*ע`*8D<$Q vKRY >Sl UF9w y.95'Yx5dq"oKB?;zX4ְf>C *yCiAl4Dj* 3|*dL>.̃’:dj;I Y"9JBHP."W5k~Ė2݄ Y]!: P74ǩEH;hN,4+@x̘>Hˮ`lW |YMy.; լ5D{<@%Q FD0`f߫Gx9+|KB?vA'raB DON#U>cĢ/zRDM/ҟQ }׬ ~#m'-cGkhr!lr|9T&G!ɤTb) ({IC<>o.B2&&i_L[y@m&vrYE${σyr:Ԯ"eOk91QUDEhzd4CD|34yP.,?GZ~ti5?,lb8_jc1ZܴUQ׭;9U`,0DR;#4Δ1$BB2.%*" ’ yHs%&B!*`˒d zdQFV6*lҵf|5F[ &Lw֎K Qa?ĵ>4> qwxow/12I3.|l chiZ,qwaNM6 LkN$&Vڜ'i$Zt`v'_5YS0qU,`÷նTETV'GjPt>=_e h!_UUSr@ ?{`'Kt% $iQ6=;U2C3?^ZU9i7?yUYMDߐ1Q$j'm\ęYl"с XG7$gf|go#g>8FbB[YVG1G$QFJY+()5͐B|tlH(3ZsirDOsg80aPB lOņ$9CxHa9pdrBچ$‘6E@E bU!T1dߡ!O!8cܠ&FGGB0Ǟ@hw]6MDX- YIPPd$rdEP>,ب<1fxzƑܣd+A'RzdNHY'Y G Eh"״8 a92]\c)mK"QS J'@/g|Xl]we%O;j $3c& Jf##[fu|Wa@eMX"4T1 ΫgCxCzW 0C%CLiOKgњr>넟8bbYޢ"aS* NQ%$ *!5d(~>|j3 V~[U2Ģ# qR_S4C᱆$$J#ó u:1T3 [r\nUJ=5z&bpN04AjX @u[@kxmEyj# !)"'`|X`dc";sQ86f(:P,vPH$@UTV.VW$9h\ 6b ũ$P@SpȎI%}ЮfaT)} (t@ W2W"4Q[MQVHH @Ha[JRJ& DD HY/bUt [?+'3}Q\_ zU1xb7MHucˀs#VP]K%H"۴@h\% DEQY0EZydMKZ՝o塮z;y % +yEe*~ o?X-%VF\ $W$xKde1mɹ%丆!H gp X|ӯ7,E  g'qG1z(v;@"6SS@`:kޘ.1ц ,a!-(3$r+ٓEQQ'{t`d0:$T`ĔQ03 f7%wLQ20ihGͦ,s*kzn]s5l1zf+ePrʕaB$ s.-i({3+b 1@6{n3(0(['ezc"Kff*PI,uI^ UamQ\酌5ҹIt4Te+ۥ3L[6,nܺb j];%$>Xup;gs ܿ^Ξ E;`.AJ$% l r@=<& AeO dy^P8;)`[}F Yg;`(rT`l $@A?CNQ%cc¡I {ͪk*RPX}])|Վ5zN 2ϪJw @u8` z&ZO.4auMH[̈: NE oa ̠xq *fGQ)RUeiCJ"dmP5OP)RJvdt Yצ$9' ~[zN[~;)ł:L`*|NSX03@ P6hĄ 3`hrCln9#iaO >qDi.a9m%WߕcC2HYW?N!:R=Lpuur9GW>''9l1ѝIҬ yu2 6r%ȗwyK.#9* 5F'Q_pywߘ4{kϙzIkHPB0 }j{c)M 2) ؂`/l*slYF#>BFq$lTT1. CUb h9MIȗhI"y̠62Cاk!{!a˳v̝$ LACHr3D9:u9N;7\m&V)]jm߷3G?;+r9,ڹ?G %n$e[Ws~x>6kJ>H'Κ({5qNiafM:m<.6hlْc Uk L'~IZD0&]w;sIϡ$t<0}-?O '`}?YfbI3JrK$dm"6lcEPai_UТ/0;|#;2di~=mːacD=%p4D!|Q15Bhx9 yy阷YyvmK[5iVwC%zy!K%J.1ŀ2Z 1(ExTM\l@XEPc KdYG|vpD{qn5аZս]VT0kR!U3-ԁ?fdLlH6K_% rҖ8scRÿa7fE6 ~$ã3ޘ8޿Ok xMKF9pӮwLd}pE%J  >`)I?/ R(Hs4AA'~g  `OQNS@l!Q&j6ٱg1 *p*QCR8m0]\R 1(?$ܭ3VT ?PaPGE 74LL h"ßsISS#im0ͩCDJ}Um](:?=y&*>5 :zjFH+/AbC!|W6+`|CfX #0&)&b2 6=,q˾ًƳ+`7GݻxE4~})!c$A99=+8~~}13Day:"#œbvcN E\A)9p'C98%(P3? 2t”76Tڮbۊd .Ia0QDô=|"fBihe,ݓß7 `>Gkde̕b#g _*m%7ca"AtC^$'Pӟy' X<؎ x(zj^0..5n-9nIir9XV:ł铍otQaɸ+S HߤC?W"iP=ÓD[ڰ yQTUA{k'7jTeOh-Jk%4hD "d]ݢBO醧f84$> i$|`U yP>-t60`ɮ$&:y$!6(&5g|ֱg6L⍾gی'TקջW32hqG lSQS-A86'mpVJS*@^4TXQK21$q/K6MH٤,Ud(5,GQOɪ1l' c,S Xf1"w|`EFƍgp#4 І GN@dsd_AܢElDcc$A!8(Ajdo֍UU;0HC+bG!P  B"oto#Oln 2ti0ܘaS{i&}9ㅦakjbnj"ͮ}VSF݇LyA|yو^CռȚ|> k{T5%g5 ah`MI,gT ʏ@vEh0,qlh$[7[uǑ%u!wC2J q-~EO'-r{͵30r/VJhݧ Dv,f yt^AǞ,;gd͌F@#-h/q|jӹMxn}3Pk9 d<1dC\[Eq Io`MD1\;joFድzFTG﹆4S}cn'0/IcqEӌ#A@@e3" t}*"1*Q=7S/P^cR'ʦ!R]^s:оbr'vc 5Ӏi =ە.>PΚrGr]vsnS$BgI'0Cn̗AH2|cNGuӥ ӉsaΊ:~U˜daձbjoqe'bKo n;;,pHx"JlH^d%KS8q6ysרeDǸ\N1 A.3͝J'}SdF1USEUBaɢ=+kΕWRe^ q˿z~i;OTs*NϪKpC3#}X~ev]LS BCxCm3#3~J'غso)ǿ׌_]B~zqL]ȵS0F|$ ՈHE!u᭾jo.8d/"LdOɤN$3pD$<%e;^(eI:ua|;tf~/{ʑ5!;fGy96!6t T[!t/õ)1Ih7akVޯNRx+I't)&3bho&w8F8 "s;Ǔ&he;eS#4qq)JÛYuQG5-[n'DuißH#:'?[!y",Oϣ>Y|Zc:kF!RA{[fLI T %'JR FQVR8.)*}z^;%6 Y/CAؼ F8y}@҇Կ?W/Q>M5 oh眤L?K!Qˤ07.$N^m;NoS솜f`X{(EDPXuCxtכ\$ғ{&ScwۓL!q%R-##JGڒXr9#gNIz}~Ƹ5\ϱ~Fw;/њ0\ =eeAQ`s ђI00Ob { *b((ѩL}m𱧤Vy U%S%$@hO_!{_:Ak+-~hn7d*fHtYL4<ǓIVBsXP: ӥnxx1Py'aR<9 XŒ&؎R<BnS iJ(ROˬ*L2FJYEI6̐"ԟ0$ =D K :n/d&hf|QϮ]DHU9yR[eύ-qlscd)>M3v,0X  ciERS%5$zJXUB ߱ RfGUw&L[ 5#*Eq M zXM'imc cadNvSwϘ MLї ټ(TX}94  )#ƛllJ|έUZ#h}^LV;<F.+6,fc)r/P-$IIճ*c:M!?Bs:m=0tr9D)2/HAdŨC[":SNsptusN65͏=L^b" o>.toih0#2((\zc EͲ\duȘguԯbeD IJEL_ #+3[EE&E*HҀ`@BPJdG쎜RrS@w>Ҝ6Y< !$CJrfAق2 Vɷ4򧛒ت:x|"z1 %8J^l(@_tpÇC#>',&Ć(&cZ]f써e8ThcV)]IP a#cŢXn7IX LCr2#݇dHQ h9a QK \OY0:NO9 bTGc%5GEUfb[}P=ĝ OH /P6F9a+4+!= {w;Y2y-)!)b_q(|I : UV8tJCQįUC!_~b$Ƚ‡h%MƅH_1-)jR S$M4O5$q\c a<NjC9v4!J p)cu9F(@Ë ZpyK<3 %QD쏺H</a/O )q9%VAҒT ';805 Ry%{2'EIؗK:KoØe8`~tw6z1?oY ޛ'H5<ܕagV)bi1_vu%Jp<F 47|RiɊ@oJjn_XWs˫w8uvmݹ.e̷&]JX1 m*+m%wtwur:8snrN3sh$w㓴iKV]5vSqD+Z( )Kh"$mZNvwuW]4H7w6+I4hs\ԀZjL1]-m,]{ucs7 PLUk뜀șn9%H[B؅~8we圉ƒO <@otHs0\ 1‰Mb뮱edԶ5S[6w&ޏzxepP+BUj܌ 1w 9]8奚<Taќ;m G(z7Y}yK^aFW _/}Z8_!s" -!?=I ̲#VP9K!,l1%E%da JMc[IE!Z hsm[mۊmdŋl2pd߅+鳃!{c(|ISf%^ *m'^o?qN3/&J^Sc0>Wd<ѥ/אK\q1 wA{7Jv@c3GM;^(1&){׿61,>Ҽ ­VڷáOye"zd*Q&Y3#iDT*{6zWްz$=B30SK>`#y#,2FqWt| UH6 ?6gvv9N2"Fk@`X3\#o' EKf2&M69#8 <1U!05b|l:r ɇXȈ!&>.(|RUa{HL/OR 2pcEsɤ6VF؆"(yJ ׅWP۾N=L@ABs=H6_@cN$m0УBCa]HX欄e2Is$4  1tEgٗuXQ8  YTȴR2)*fVQL*v03FNcp0PF2hPLs4Ad9K;<!8$,]PȄK\´L@K&d!&A(Rtg}TlNCx9,L䉈h:h:&B $qL"2LBM w-+s9;4CU)0ޝFYH's 6 ar9/!$yE4;r%^mi!3+&$8\-f`L8 騃 뮶 eb%6;B#k;"lPNb!,p4!cɖ\8*PGE\ss"1cK"bq5L\t i8tl0sú>1lYr$ S S. ]iL [&'%EO\,u% ͚,PC2q\I~ NJ~9fƖCDB{=";ŵ&B68:r(̉3@is1$Wy3抴N>98YZw&51t'#T8no|1!rAT=2I; 8d%٭6jgNNA6Axͱ8Κ2-$GH*iOI&dh.AŷndMODǿҥ/! dZUOZT };6ŀ9jaI0" ; bfQ#04ԋ;b6ܲ7uN#hgNIF͓l쫝X3uKlPvq]`I\"Y-xb>Cg)x=;)礬T:{&1@ l<3A==dgeHZ{PN0zNF(J)BaCP7b IP[dgZ9q2m9nW9& zk QHTH%2FL%*H6~~0S՗.4]:'rs19TG`,AȱM]5teH4hYT$ B8>O]*2V/wN·BDžLB'P>@WN[ 43m#'di&(Kbn G{*Pi%=ߎ#܍ɕG-jySa'eӗ-LsbȀ"Ҷ/pkybjqc"(bT?;6+|W$ #o(m"4?gI{ڧ0~j3 03 W"k Pd":8O͜#[.XaNKH0FVpUN|NK°g=m׳DE(#+ ;hipt Ix ;AԆ@A2f^=1Fz\0(OWkx9i+;,xIa8NBr!(D)j'pƗ$MY TF4JqK.2 [1hDJTbAZ~ay7)F L`=IΓ-=>=D}{)Ғ׷ @;4ϬJM$@$a76r>NemjuT5'h}m.!p2z<ǣd#ÏHjN×5\mҌS>S옣t:}G|{ `?D(F*}&mzwӸN=6mE]j4dwVMFljȵ\(kQ TH  D\$A7 Q?Nut%98 8I˨lHrʕv` s?hz>oB|?WOjvNԼ[6+>:&aaK}$x$3oĊ/NA1P{N]ѥ[}[.zrD;*}KDb>BcG}X`24z̨X b%}`OHKQwvYµ,ʹΠ5{XeDLa ߮3Sd1ʛa!4 W3#2eHwdblbEDRQزpRGmGB) F֕xTR(0JZRPa=y•95-ZB"Bq8mVI[:ז: ,@+R1$``l]2Xc%m!yq2ض3k,aff Y"w)اxsـ;טTm~()! "< +Jw,bגds㒝<6߬\$F!=|zOR0fh”= A *d$!@RmJ)mZ(mbMlfZ5lI}ꝍ*LTUlr'L#9 ;=~#%&{n G|kɹ#O @5"jSR!L$@rb ^ R( qP$s_v}MȠKY !Kv~0ۍlc :YH;M!<I]N`a{߷*(b۸2I$W;Ui1~)hZ1=Kɓ?&2'X_ɲubLE" A(TDo DB )SB))Lh"@N= YXEQq`h=#&円F+%6ODW' a&͡L6hS]TmɌpLxd KZK2F10&1H%iĜŚu;2ApÍ !# *I ) ְ1!hZ >5ZeB`]1(VQDOY5(}Pvmq1nrޏW" -P}*!:WK3r)Txw0aN"Ep$FJCr1$${bMb/$DPDJ/ |=JB1|Opj#ءUO2$ꯏ?:kH-\YMAQ#V- Oefxv/# I&6,%7jlZ"eAHPfPf (=l(~\Fj(~BiĖ|~r^JR?:G);iF-NJ4v?DlMGlk͸KyqN3` 06M'}9Yf20IE.4F!(d%_106ee B$TeVFuM,T@H\)I'ŌCDfޮM"Dd^ h1"I"8A!EC]$gJJT N6>z=#fp"DbT 6VI?Z'ƾ  6aUAE6INl}zTW>XNUnЄZs8@2N׮?>8N!l]BD*+":S$(L!8*IV)JR L?x9 XE~<^Y?Cjҕ Q*EUSvijeG4SR| Нw.HhqnSBC7ْ'{G(M!m I%g9\|ap0A zbt|fR͓ ԘuQQه;$UL>[`E1p4I ,JiQ!N5AC0a\y'>BR-T4Tҟ:L_ڙ9<90Cy?<=!^#!N0I z!F-DT˒J;Gjd#}ʆ8x:|}.aZdU1aܚ^4 ;ܾ]Y0ofpj)JQFR~d{w1HOBQE! ,ICnH3kva (Iu_ԸxwR>W#yo;OcSb%~Lo#0A /5N<iާx~pP#|Ig?Vrz 'XW[!!MQ,i?aJ"08~ 6,=r`d`2Mőmoߍг3f\tjJn7TZ,0֎$E>,;I,(0\:/Cr pfC'VjIH 1Ҵ#":6|X6x('cgd5&0cjDlRQŮ9oRheaaDRR{:0?g5\9MITiSJx"x}[wpQDMVj2-PHYh FԕĻ&q!pIBP6HbtHDM}0vZ !&!AdCɥ[ݝK~U%oɶ֚9Td t4EDA bhI?~Mq Hq<6%Sv8 fM~q^e,uDAJS'Yg䥏m q:#Ç0% H 9)?IuTG#lE!f$c`USC`txg弸e0w̓I ar2rCڟy'cIY(Cz{y3w2KNȲi>:qIOT , HiO yLxڮkM = DQ8u,4\o,^:wtR9A1'&P$lCv3LNVDxw~t]O =gsS=Rod8vgYla@ALDD94FCݒ;id-DΕd^c XB7OֹaËN%Fy*>.bpr,a;MJlMHjb)4|$*." B`1jp pt7kl7lJMnѳc6l>hP9Hy0"@EDI,g<a DBu[paO/g}2"#41D8Ջ+# !?Pw6tfTP/ hK!)0 B 3-KqV׫g-(]0!\GN`;;3#<{ʩu;#>):}fy@zr^z@^@/ "5>mCΏ`@9ucN^U{pw&<' .G#\9V X#/g|_k')FSƩ~>D|J ?O_]4;on֎\ b39 B`1Ҙ쌡P3gF;Ƹlvg!HQH9;!FsDaJ=8A"ʋR3N{cadMdVIӺft+!"rcN(\>`oF$t[7z qdc![ڒrCVfy(D94d^1rKrdHy$6&bBkhxbHD:d֙4] O+޳U¸S0d?LkK@]'d݊F%d@ qC0!;)S,HΠ%0c7b|c'1 0&;"zjӴB4M(n˕?rd5+ܱo8E`pc-:P$54Q&%6=p0!$Pe9^|&alV:L9*qTjܳzl 1h߀^XٸKn.s'"(B =\Toیtx#4nCI匳F|[<5MKc~8TI>}:κ1SF~LS#e" a'M;L^tsٴЎ/lɝK'ImHq2CȻɨVQ4sjCb=CC}=υI< >a-@#9v%GeN~gT?V%O`e@Gػ:HQB e! Ԍ&&t$|ؒH U C*E>NC|"8!#$+$H~L2̺?\stjfe$D?i7^~g c)zO{jc>s?;]Q & U/2dG H "bAO%$B DdIiAi(J(RZ6iTЌmRTiY2[&Ȗ^kWJ+}KE1f(6*HBlXlkWVhѱ[5-#b64E+APܤL-SHeM@&$?Įmc[tiU?">60qAĪUoGGQIh"* [ټ6LZnGo7/"dG*BTqSK ! S$Dwވ,-"̃q1@PҼr "mIFPUj9.g6,H25 rFQ WxYk3?͊&6P3 rPX;N_%5J(+$k*_] g{@v^(  18*vSy/{wBPP@Pzb)mܨvΙEN{1o^=Yyͪ穽{pHwúsxm{]a4hy;n록5zb.{mzK}uNuRǮX}>ʗEyS۽nun@>=M 6T}oP#|"/PoNsw7>wX@@t+ŹZ`t:wbvv{|tmR` lnOȪ(-`"v@A}U%{ܠLvPT4@' K=zy.{KBRRf{X:oTlu|^Q kvwnnG@ /cJOի( AHN:( -Pb hpT(rP(@w cӖk}= (\ƀ4sbBא,y}={pIΪ["TQR(D@m8 D׏+B(]#;(!C-@5؏_!^)}t[/yt_ QɗE>,M>*7}x{`k4un[ɩ58"kuv3jz;q.XokO=ݧ6fͦ$RfC@Lde+A>AsR*oV׶H6e(ɫo]]ҝ)) ]; ѭjUۻZIݹmZ+_>Ko/$ 3{uxv5;Ww˃-&Yڼ/c׼q[ SvmIlZkk݃vMwN^n[o}>]nx/oRs||Cwwӻ,j fw[ݗ^] jwO]umyk]mgK Y>}ڼ`sfيz2u)n/=[;1ZU;sx2wvVIvwz{k(ކ:};O;^m֮UkyndQ}p}n˗Jtjٮg׹Lwf=۳n5G{zMSvH( rlm{nJmu9O};†nvm9nx{oz﷽o7OvsP=u}=\s,3gJMN2b w;| -Zz<7ݾgon5Dݷl۽Tmdv<٦y׶m}sUG)5^}tngzuw7־_tܰXmѽk׺\o{DV=ky}gWj۽Ͼv}w}۝}>봐]sG7v_{=P;Svyل%wm^;ۆ$UPǶ (sn՛l}}o8ǟ.ڳ珏5SzSw9=Qپp΋Y݈)=w;;܏-o=3.=ҝi'o4]۞v>}˱@{}ֽ mf9@m[; M M@ hȍO& B LBbh$#zmCG L$ FFiy&O$M2c(di4  DdhѣI6&O#Pde0G@h$ @ Bb#@LS11$zDz(OI h2S4i= ЧꞡM=GTᨥ'\&*ibb h XxKGC7`9$dt%%TT@E@@`& B%D !& Zf!"IH C)"*ʴfJ&X$j5%$BI 5-RPC A d4P)B*8q$GΘ$ e ^sAão <$(HDEP팿#KqD8RM,D0ȎU2pH(ʬUVY:!?sQǀD->g\ID\e)X !&Z49IQE2ʙdS &%BtɚtEUU PH8k*"!""vQ$e2+XDUUoerG`GPPz']K>gr3TCrI ѳ[vLIYDt*N@ )bt; H.¢S2*?g<_CBD J~dbP庆V"Yn_2 CJΩ>b&NJ?Hu IKbthfU$TPR.À5DUd0W.TrԀ"vQIP~g)%T]E*ZSԹE܄8TY-aBa.P¤Rl@ U@P K2ɻpC3i" !V(sUU'dIDXdª&Y]P%slp492Q" ʆ9;j$?;r8U*#UTUGPjD/PEe<`]yCà5F+2F%I"9XfhL;q0y:JqX}޽<@R}:kYYTEQ1s< g _ $idXNx$+q%Eb E (9r90+4*v&TDJUT-H+FdA&HӜww6A]I h[bΚ2L$̈́IH!B̛pVȡ< 4JmCAx_?I*_!OO'2<8')QkyuEOqӕGyW+;ypC>"?]/v8}+-^唟w\-K_qdXczEWDWww~QR*<}\;+r"g _(ow}{]Q(y:ݸE9& 51/$j ]ux\'E|Z?7Ϟ_On?"x#緎lDMUO ?7?0)(Tn:xjH,{Ѐ"X&B98:2G}Y?rmCd }bo) Hhd9P_o@חP&~bt[b>ibo{|kJ|( 4<(NhMj@ؕjUXXZVUU]YYqWzUUUZ_`(D(ϸchV!p@2 C*KAU.?=\=b;Q6Rvbc#do-j61--j"*"*q_3#N2QL[_c<A3&aĦ"RD='E#vpE1%1%T b'w ^ :: zN:UX91Njsr' yNL6KB*G+i-W[a<B$rGG]nqpjV[I6=r ,2=)LQDJ$s T O^rĵN'rÌ3\A98GGL(Mԩ^zGgOMW] W{s;x S:PZR&y~q5=rts'Rq`Ɗk\qm@\orQBBX[DvS냮8kď;_w"Q쓻]N dvZ:\\7:Lq;\9瓐'n*6(qA8 TݜwhAx<㺕JH]GW.'=ҋ 2nC;q0Jx{; ־TRڍH_zJJi~BͿrOL8Cg)N6:AxGp "zjJa˟ae/Sizc(qZ`Q,m轊,z 2(%JMi-㶮ۧOO/ ˯(X NNvLUT!VTظL۞Z+qgHhϲځv8'F5&P:/qsTIʜ1+ݓѫ}b:h' tdVrḼS?K]qU3Nkj34#U5S,t,HiPv>zfǿq*_yM<|ť"~2 {V 0JTeƥn3[aC?gc~:'6= mePTQPAQ}5b:%S P*qB )\8y">!*c@tMRt>}Gikդ9GGX\y0*wG8ېHK 5v;C/t4<HG^2sKo!HG;ʇz$z(_>KZFtQ) @ڋPL:&)\juS`tѠJn< ĠUL!?loir.!/q*mN"dR9LFfIa5rz׷i)XIJ?ou .Lș`۩kNRi?.Zq‘JM,A(A͘%|*Y3(/j)˒&SaPvl;刵 zMWrkYǞ/̺oDT$ hӢ#L$2RLf&Qd;߮:8~gWʸM@$ BҵmD~]щF8WAÞM^8_kҍۅ o:a;L>H/E*Gό191HtqYg> 9Q삄vJG9:8rѠf\M Te>(Dʊ~(lIapts+!|GOd1LsD77p#<9}Rh;?m;4QlyR O݌Nɘ-ԋKWkp(v7.-oiͅVѥTe.^NTd*=u[VmOn ߟy5ߞ}B*<5O#"02^~xM4r˰no:i{*`䞭*DP߻n=ˬ~%yAf`%PRk^涴^- ?!>HOLJ ,CPyO_eTOt5 ?~A˔cimyq jqĊ"3jA\p_ #|eV45 'm,YpZ=ɴ-vHW|k4SuOZ%5UnX瓂 mCdhw<^U%ˎ(6J!!u#ēR$mY?+2^Q#XJĤKcԸ”l2/AO{{?.@yL~bt<9|Dw1Lr$88l;lh뷬񄤝u}l^Q"CD G~JLJyry[0W7y#AhٴkKNiC@s+& $IY ޷/>K'p+7(MlS54ۑ5"H$mJDqSwUhlԖ:(JtsM \ŷC\QrMog'݇?g(ṽ)O+PTK:G+hS BPN\ nU4FaoJw_oCHל>2ֱ$l}?{T;g> 5 _y#4~S$":w; ,dax_lEMOngE}sĻ@k^+㶹> *B?4r6I! ~;6)"\!_nX! VI"^F]bAϷnW"O QH5.Тeu-JT2& dQ% %ϿG?+ׂdK]/.Na.uʎb\f (kjadʖHί6孩А1q;t.eyNQ$BHBŦ(9fh* I}J|Dt4֊ޙ[K>[QL>H>`;MQG_3ځ ܦ2mџzZ֫_TzP"ȿoCƜigJ\y2*N5{odk KDRj\MM5U2/}8Q}`h÷ݛyrW:ᡬM3L;MJJz;4&A57&]\P[tFXH+L`EE$R-„ [IE-?~x=w*or}]nr08S;#0NNp<Z7Q#4H~JZs50hc]ϢeiXVD)S [_y* %"+©\Э<;mM[ F4 CMLRSDRrE%_7cEb2CoՑۍ(S#|=~_ʣ@D!BRAIX)IA )ZR<`Ug֔ގZR5m Wm9H5yoT;#h; k-Q9!9_'c^óúFtNٍ 2DRWTEEAG|-+KFJ*)((jV i;.i<_ `jOtE%%ܱ8(kRn ?GPotς\e$R8E`B RT5/6BXݣƙ\^w"*f޼v6ʨ#_n.DCu)b&XZ.鑃8g-F_}v]0`$kn}@\3KRm~]Пuu3>9ۯzL6y$Bs8>cye3kٻ2h'VbPGHBm'zc=+^[guKC.9WbuUJrUvjT*hBobgڴ F HHO\!cl=~}=)|s} ߏ8N*dy?Q]O \iAyBP$Rn4!' sGVm8s2l2IpTQH DJm̠?HCIfS/"J?iAԛX&%].%#Aϟ;s٩F],Kͻ|;Avtr?`dGv:✂lDl J_fNJ+ID\!-/xy%F~{|7 ]'qxxuo?V0ȒwR{()*zƝۉQ&:""8 5 FyEHgSI_tDE AR It(v;!Ӿ<Ëφb_}k5fp}\韍L\ݷ_(@'EPlL2!G֑R%cQCO1P?b&J\ɤp&sn_E_5[AqLnߊhhuL8*.*jw.03y)aTg _/guhv4#+G^ؑ⥊enKu$"@|s92`C`s<$4[a7'?ȫ,_jA !A2nbnoawB 0 @BdLO]o iLPZ=Ŗ3oR2)I eLpip)s! ,aS".wޫQrߋz^8{Ou{OBOL~R8$IN*en%ۏ[Mi"3KIZ|ғO%2myY܊ATeK\KE+>x~Z$S?)H~@q#ۂVg.UN"lH)v A+J9p8M8 9L6G2]ں5o xC!Y[sռ0h bf&]M[NR9zm+(:{Z1 wfёYjbr f=>PkA AJIpgC(u?t'͛)V0r+d]gL<1/\Of%Ĵ&Ntvl8&4XD#‡ %eoUCs Τ6Iʋ)! Ib;^NV?/sԣiܭ:"*r`Q̑ Nٲ?Kw Qݴ(lbl*UK0HYD1#z4tk1trT%vc,/Sֳ2)1R˘v!LvMU,cRː%SP<(`)ɨj\^"TCI: }o}w׾˝TdUxy_uxޱ~mݒyxxs%ڟ_돢!|ϳR%S htUD;[c' -qRAtܣ{Qnۃr'՟5ᤔ)B,IJSx,X Se #HZC`Td$럼ȇ/k>> ڪD%2b>歑"$R&3zܨ A2ڵ4-T)Q*PN'Qmds"2MQj)~?='*2xADTYdRgJ71pSI.,=])4؋lz8|VY13d 5DTh,L9g=\ΜnO~Y8yo-tKTG6#~.MK |>i[/ӁT߽"ʈ 3 r`t7@-F$bPm\i =~ooA ڿe<}h0YZax A +:ܓ{%H?n~zyӎeOvog-_'-l`O s.Jܛk,-ָ֥=38NNp,Uu*|eNN59*{[{}iÈB?r1#$\r}]I%r{_Cg}m_7!|@~kRtD߼Q?/zȇ~x@P75Q2`W5ɗES(4Tzvߏwi;xwQ'C]%@ʊkLjmʊ':2Jf$P*8&.'6w'Ά' BKy9KD%e_qG̝ ! J1e'KM(BN[&I) CD͊o( +rK1EȇUMl:p'Zy<'tDtJ=?˒4LIFlWEy5wCeb+CrKSPA4LLlPTȗ,rfg1Un⚞{.?8y(8ǜ^}$҄}9Au2TL-HQ1LĔMQ@UoTPz"H`?pشffH廏b$%9"uDG0z oI&TF;x'/鸇o 6*5rrRQÛ(JB~֨VyVF%Hܰ# s0!_1|mVKޭBP?D" =:O.{U V蕐[(\ch1^ NfzBdwy6J) Jnܟ X&{{Tם:.$~.Ќg]n/wOq{I>!\$5y듧ߟ_Z!1lancXޟT}CYb9WcӮK\/=|%4mfܵp[,Hw4A4EDPC)[ " >#VLȡsb,pjPaDWvSSKKvD:G&&"( JH*"hĦ<cS0.#ܝ7?y7lBR64cEE85-)$a+`e=X6! B!J"Q% )@( 1و~%Koq+%,wǍ??پNU{"n(AJ6 :ӴwCKTvԁI+̣ 4tjR(i+F!e"&Hjbii_ M*%UE.I@htP:^T LE-QE "ܚabͨ "6ϩ<Q-ܙ.ϖq49LUPRQQSF M;- @it8|??7c8>{OS%|?[0Fůd`(:7~!I\86~ s .gh"99v'>Q1iӟ,4կ6f?-8m8f}xGJzu Lfւ(#QŭU 46Yҥw:Vl̉af#sĊA" ZTy&@D5];y@9J`.g^US\TwX#;CD; |Lo1|mxu=.Ux"F Y 6KU9~P-*۶3Od0?VqbePz PN-lw9X>=kO5:lAM DV-Ujpm޺Hb9 O%c${. xtj9QI$,).,C :Wr[p~ qLH5hhigv3`]?3"\[$aUPJaL]_:c\rW{4wRm#]1Ɩ)M"uH8LL5"r:y4( 6 *('}ЮҽtDMЧ;;'뽔lcDk0;~;I! VB@G;OxÌmOAQlGh}N@r8\#ɧyj#yQE!05EPo7cوlL1#1WclB]ƦZB@L>#_qOV^<7.^tc'JU P%z7gHHRR66ā #߼D./EP2RP~ANeN>Nܷf¸ڵ_rQd|Nz1IGa;^N@c#Kw_g馢h}9@F|9)].Ra:qvUd>=w\4m{-۔DU4p9ä `*q S]\ūAu"B%MMOjS@,!` ʈD4 k5hsȔh`O(?UTDR3 PDw U@^y@&B/ĎDG;Nh͊KU/v4*1 =҆l:F*j'tc_/F\v~z46JDаvJfHG no\t9 EhL(4#T8J& ({#~~R?ls G <0ʌE$:#z˴B$.o֘w?pxI4:RJn RH}L>s$DahӐA~y6԰x UD㴝2hezJ~>i7(R>ӕ[# Y6lo!o%NwxH`F\5G(QE̔5D"&I0g|Ԏt~wPlx(/)vf$ _+އJNMR>ߧT|cI!栫 '6>^sRIt86 )d&)P"'}qўtYh{'}bj6XE 37*9"fs1b3tō(8¿]+'?4J #f^GINf&fyy ɐnq]Z}9S|)/d*Hv+lsb;1 k|#q52_]~8Wk/ʐ>M\G>Y@ >v>4h0zَ_EtdHHxk]+$ 2?W _HG9:Ⱥlu@(xCCHhB뮬T:d7- eA85w4#xi1]jK+u/p$/>5eƴqݽ=G 7Y`! s0_4?5IG+ e%_hi#aD/^@뺃P _""k33\&wGT" <~. bڜ&@vdL*50$r,7A:,0fo厔?:jqT+MyXd!VMʃ8"tLQM_^%򤫍c=D]ˊ:6q{CK!H1cL/9H8L_8a0W\kb*p3$Lêt1VE5eL4ޣx(fxl43&$P3\X G(*`3ތ\2D}JpLR;(zfb[%4KǻHCW~}70j._Ş<, $^s[=q'ٷRQj-A#yn\AvBtBͶFFΡA )dHn7 kkX@DHl!&=6] lϢf{BL#7wJBjpAR+8~u zi!e: ^of^5ɽﻴM$O3 3?C6~P<̉ea:.ǁZBl[g!3__weG? s?ݍkDiqS CB OQ'׻1T]y왙^́3I_gGh  ;~Gg=՟ۉ7Gi$F`l3}LN} x>؄i3Q!fܒ^/];f.K]0i뎏<b$j@w8SFEjv4}pB0! *~VdDĊ!ԏ 1Ѣ/0991oB]lϲ8C0&ٿ H6d#//u __?3!̔%$R_8ܿHL@YW XsS'2{m2Ls{QdY2}~6˸xv lAE@|x4!!LxȰE"݋1D= z ?w} Ov Qd'p^f7!:cQ'_=sлBzf ^ 1bk4rv2;N{ mC>q!мKk9b9漖Fs&e;k|W ;8Q¹i`3۬M󙘉V߸ZAAx3t>@Í/PcǙ|;{`CRϧhq+bD>|4'CBĴVܟ30pIjDd;3}6]Ij5\6l )P"% (3 tJ%QQfVr-YE(9wS!;)bdC9GxI`krWE"pw[Hjrmb7=|LpQfȄ2}~e) q$/~~ߓ\#$8KCnQ_ujN&zQGaf㠶[F+<<,9JŻ's {"Iss8XEN9;t@MTؑ,dR J^3AJC 0v8^P=pTqȟb\ =(S uwN\'$ ?䷕9RAB?T|ƢvQi`k M , iwv'..O?`vB +/$cKt٬ċ~SĕǏD-+R_!v;̒صי>!+sUe?ܨIԍ'"耢G#aGձ/(h$vcZpFpE%Ktw*m-}XJ eYm*Cnk4',b T/1j!\'7qܜ<-5w=:kA15w>xE'`u1$dz`"j&rm/wN xh sA`թc.T>0~R&$.cC#@ۇ8##-&; p\u֌ H[(یd{d; +_L/tJu]~){}s~wtiJ2T_䭇($#okǍm}=qO]PZңS_WFs=l.џw&]11ܨXf!xQ>V)meV9jr Lsv\P[aG~eؕL']ߌKjgh}C~>rn§#HqlM#Ý}z3NjF5}-LqHK-.GYP=|c93 P0R}XB -~:pÍ5t_( Ϝs.PVi:+'~I]pcAj4edOBr >Pc>aۘQ)9 '¿EBx|z{t{rK賕GjzYF߻(x̿Ò/#{7'bFae%q"YE5&wK_>~1a#[Rd'n,[UU4?vᛏA¨¿EzȮ6B|W>;lwFi(lա[]svwY.FbW~0+TxŌq}WyۜRv!'D=+W=S3fYeBHYUKg]oTmڲM&+I>nHéFr3f!v0[, *gёv7_>ξ2Qy #⽚,>_@uo9"FuaЀG}q=uaI&@C_yh-R<# Zx.pkb ݝwޝ8wA8C&`+B`SL9;]сMv5c512MD5DplQF|PoTDkBIZW:4tNȁ kIkxŜ`1 9^ÇѓI+ k9dwqpDRq@+T*3`Xb ՕGU}r#gύB4"+4r}L#Ś# 2XuF D"2d4 oJ>3H&9/˯I>e;'t?_"wg@uY|( ^LПm7]9;?`~Lsk~g("7aB?Oq4=) Kv9q9S*@uר40ȨG1x$@r=;LzY>_??\=E5tVP&mz([4\L{ZyzuJosVbLj5Ûܚe1w<kCHgMq⨑U*fNW|TչS>'Li%8K(ڝx֌qRْRܬS(;S7c8IoljKު86V;Y+&nMiNl-8MV kԥ2&!'UnRi2RZۡZER*kb{fku\%y"ʱ2[θkvwzNtIJVCNZW[FqL]Ձǔ|k92I"28>,#5;MN҆lw{;*!}ݕGI~\'Ԑz)bMIPbao?t7[4._e)+uKL~4t~pk}:|O8S$h?)* "%1| H E!^w3v칎W%I!!2d uX0iܟ@0._;ʲ̣=. 4!I.@ ̸nG; Qwڌ_P r! LBI&?i:%h*Ϟ`PtZ)1Upٍ8 K[O" d`tsL TCp~14sp #im<΃#8T5P2^SER9w;.S4imK"M"i 59P:`S￑O#'|wBP4S$MCJEKKHL$URP %*L/E# !-SEAF)T94D&xmi(-]`uyNg,!GtIduջ I{XQapI%TDTUUUTEETTDDDDDEUUUUUTUZƌuk`?鏿{vؚŴ5y1oZ< HhHYv8dB{CLQ*6 QE߼o˩݃藁DEEUUUUUUUUVmpGΙ? i|`-1&e㟌}R1Fl,GTyDm-PICb$D .25*Ⴍ;pIy"!oAה$xEE;!vHi:d~?kF/ERSЅKrop2ʅnP y[ij<>2SZ㌬|ޤj;vq9EX)Q@=:v ޱ/ ݌)+Z0g(,A)rH>aË7WO&?fmȰӑN{z(ְNcL$ ;i$܍P?׿t@m!五CFz:w2#BHd2 ^1HD}btfȼVP΂`#b?a`Iw1]GA3}q aѐTnΡ=mo :GSh&; M[9n4;ݐǖ8x%6h"*7lUY]Q5дEv|6zsBia1ueˀ7MJ 4̝O!:,r9D]O<ԈTlٹߡ'Q C8t?/"pKcdfV a}W폅Eٗ&0 az`ið>.,ca#Cv]#{]ddU:DUdiLSj9F/6FwL&^Fnamu#U2]̼XW8s@l  *&/ʟoÝb=飃STKlpP-M$n$U,9pB(ƁuUe-Gԍ9Jnn-lRc8HU1:nRK'Y3QmZVXI=QDJ,4u:,h\ e~^OQ竸#)['ymZsy4Y<ߋy̯Mu>s3:Wq/-LJ˳:4ͷ](nuEwL'>S/uF*^QfNU+BRDNG)cX򌡝HY|2mlr9F(B fG;*UdrWU5w 3aaV"a F],lJë ~9gumDc[AhmAV:8u"BţRt7 'rXX9;o5qLivƱzPw@hq'ӦLT6Ŵ{n#'C+.xF\z0.lnPй, R52I#;in8,58H mBvo,B /YsyBI8\3]܊BVBF ;@l-vS&[b0}5"SvX ދUu l2QmjH96L=&%{ߌI Nlѣ9"̥aٺenkY)":o[ H_"Y 8W6L7@kt1 s݆q4`Rwj-/7]5 34ۆ KC㨻|iF`5֎nŬ#YvwȪA@ynvU&YAռ[TLBçWf[W7𓕿"@p-0,.uvq85Ӻpx [#h?N*o|mCukhߌ+RPΉ^³^M~jtOo9 ;Fe#]'e5fvTz1˩6`]FV큠HYn\'!&Ov&a#!ƴ@3}DrvBuߕLNuI.ri2c&qk$7KEghN}(,p:\@A6ʜu<|">/WnHVizMv:(3tNw3^HV0& pEM2%B=6o=rg{ܕstmьo,3όp+R.'%KSϻ̳g4 gjOSUt:l!d; ,Շ!slmU} |^mρ|F ځ]$6BdlrtA{3>yٗ3 />CZ>n(Zsd[C goJNM++TrFDM̄}"K.&sKI(T;>=d 꺹G%/4M!p"*Z 㓛"N@* :ʁ +9P;:e|'B|=c4(.Ms4y# cm-E!+5HBU: |ûtPxeU$$bH{ysngTp&ѓK|oUtD)ЕH4WۼNiW*4C֟niiv7NUzkaɧ$]T4ₗc.:v ๭^ Sٕ:y“OzhZ1 'ONvEVTKj巗Kr:8ʫW&"A:ܛxIcԥy↥7yp6zEEfV8n8ڗ[ZŽ2\'Iqjje\m3[xJpiM[QIյIPRy$^ZׂvkM{ݚ\w~Q;?Q>*~9 y#z6<ޟVuZm6Jwcxbm2 ]+eOi"]Grz?>$D]e4w6;:#D t3$MTpIP‡zPi#('SR8<8i%Z(o,$3[{ڻ_?l(0;8#N/|jȦ8B胈&= O_83j4{].3Ll&[X0#֤4 $JÙ?6n%QՇƟS^GILso^\m_T;fN{`ͻ|MΝ6%ER AI 5RX(T! S#EIm}?$@Q=ݝ!C)9A;iR==6ԃy hܜ>)]C%1 u[i'#:}O$u۴ݥ$'E*Mtߪ Qbe&h|L)`i(XXC mV*d5=æ_-,_{]L汊y $Bg$ | 1.`_7 gK_+ژ:2BɇewE` &kL?6= ;>s&̛L C_Ϻ- }R\y)Ǐv8nc:,֘q*ordRM!]Hl+ǨժW" Q-R'qŠUGi@R{{&L\k^Ȧ:ls18&)".$7)mk^4oSPw0pl[?Nl>?aj{i7.P{M;}ؽש.׫g z3M0 9H+c&6$G<&.\B "$?q? ,;xw|5lؘ⢧vqmݭBI#,?uW 3d>ۓLB=Q]QB:I?.wV>.ٻ=0nۡ [~&+RBLq1>Bejɍ%G&77H Dq7yI9A;ڹV.ABBE {*c|A-`6a keåBD$ϊ`3mAPcp?_XMngZlhd ޵z',^q`L8PFӴk"Ѓ[hgX js!E# eyi *FQ]~>{k^\֣j͡D!'V$ӯ܂'Lǔ"M) <!c~M',16ekf|w} !婱-F˱WG#\ӯ],?bexpB鑌d.P9A}P=qjӤKBq įwkr o\p9oLʲ8N=b"n0D `]n?[gcLٗy8pξ%$"uC5[&;#w.rW313M7΁g{\o geHeYu \͏iO.2!A$i6[(f{6tͦu؃Q^r7~kɭMj&b.z ٷim^pMtKp$o<'΅rCvYq((nyE!}v^c[7d4t?<%+CA%ӰWv˥C؝lgnc!cLJOiemgkIh퉴ܛh(O{cr] 3׳dRLh-awvi3ZT]:jLC8|0sp#ALu_g.GZ gQDF:OďGTTf3#d:sH2 [f<7@ bt w5֞S_F+1I!mF` !0V*LyJ/Ba3!!HY27@>##rMD<"Xbƈ(iAXh J#< !og<$F:HTHWqh)||F-a%`mgƟ9?_ap^:IL|<\`Hx|5=nñN80W[N"dd>+R:ͯ!pMe-R^Zw*zk$(ғ_iƳdـx8E}cDlXL*k{}5a$d '! CԬJlF:?ޕU㱸Ҟ+ GJζ1=RvSz;|z\W/Y.DreeO;9왎%.9cOQzH3s~󑮜UUUQEUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUEUUUUUUUUUUUUUQUUUUUUUUUURI$I$3TgN9o]0z92 Ue#[իg^It6*HIȆ6`lzMWW9wQvQf|5/B =zOB ɎuxFܐ~zJq49+j! {Jn1d]xpryeM!by,ه=aY퀎&\PuEǝ%5 rb]ȤDFJ/wUqT52JXD 4q@"via5yWH`~T_k VDmHM(!]Lۮck -KuZXYHqKpu"??lhpY8tIiHY!j&}?'Ѝ`L2#BcЎ/Yp (_~Hb{k _s{'syRLHE@(rxog|wo4}zZ6|=J*!,e(6iwk_C.QXR;%Pĺ6 ZEdqU݄$bb_}7ܾ 0a5 wb!vT[YXB= ?*h?LCZ93-yZ,cgN}*dO͚/Rl%+FE|vw!S&ϭS̘6*9&x2O s!f<4[Z <ӌþYC?B"/<{QN%|Y =n7W5.O{UrB2ub4#spe3˾JgSe.jiZRZ>VĒI$ʢʪ*2̪ReVVQEUUUUUUUUU\3eeeUUVVWVy~rC$$fEtLzvBr9ona啚Gfk\?nbpB2,$;${$"muW;8fܓE .@<2~n0& E Eif{Wmim6jT/R1ԍ@9G2K(.PS$Hʒ\[]&ON)Z6BؑkwC9Yg3=_2<]M\]79QsC ͡YYz6U %re鏦8*MHm@vr]3z뇉i{cgMl)#B&'Dhrl͜3tc4]=F<'d㴎3'MO]S ѸP7pC0M4JoN*">NŶfW>l~sj /j~r! 戗sg~Zsm) FDVW_.Y]Tuc+)m,FM $?QGflL9ÁۤHe ʓMn^ְNE.2z$$'I]dwI Dd&! ~PQ3I-=)K=79&Ā6Q&~JD5йY⅒`f(; c Wu^,t M:Y5?Pte,͟!PG4MHHB]>=k6I>?nQ[_8(ꏄ|Gr~Ոb=u0Θgf88 3!C)8U^E@*m蜒x7Z|M}jT`N/ښ E6|PrpჁR! 7ׅƗpbUTTPbUmtgqJy|}L״}xgMb#Ę9ofZp0|lI1H"M q2A6'!$&vBp#NXvܘ2N*Xs[\cS϶LGNu] <{ol4!E,ܳv^ʶvxLI] kf7C9Uz~!oDѨxCO nI=-CҺN>/ݽէg^oG()@R[gu=[{ ’R%?>.t3-y`NA~ؓ?e/O4r.RNh"9Hܬn ˟=v]#h]zO hu:PQ}uxIo?|Ί/"oBa90ɺFwI(p s-. Htgt+sæ-ڎȌZz: !0R"0D} @0͢` ;=P D%1JQR<Ò>ӍZ2vN3g9AAA{d$Y8'7*&\w/%Fmꈢ/tlODNkgYl0Z$EV&dD>.HE\?sAC[Mj5ikE&fS\Z`7sX;VҼ HAih: B)֧@[{!rggn "!rrQ.ɶ~#Kp]!nM_3LfPIf$q~_\Id[ةnw~6i[g :s F٪gϬ > c/ҷ6#8vrPB#9Qa>9#jqEgQfԓ+,ո\^w߷Anj NCQS6<*xXomf˩.8vI ݐd%'Q}דpz[w ߷>b8(Qqw11pi}ޟ໓Ș:y(1OiDy/d?EZޙce{0ք^*Pu^rƷPt*5k0{fav-@??"]2&(xHL}'W˶saoTOk ?V&6&gW:_}o2E=U Cg7ٴ/%)wU·E#ieD&kX;I y|-e&R;R^wZGL9>Z=VT4I>Y7CQ,MvSw|c*FbdXɝAM6os/=P#&x;Ǣ>V-s~jmb:ɾ &<HL!Vx_.reJ+ȵJ%/G]%S,m BDgK=3iE7zU3OݻrM H76;{Fv.o &M8Lm_2/M߿Qnt/k5~㏰_KZ;s f26^YӿBwĿ LhGex2@.IJzuGjS8@(YOU%2>vxvkj{4O&KQc*f%yAÖUqUDlx-8~by}jA )m^P3dIkCcsOecǏ{ψk'5Yfu؞ޙ$i]D@\K|26n+JT8n[+TCugace]kٲG¨ ?Fasn%̋|{]rLq,h{-?7MRڿߥGGNjJ^nnw$=}=riįŌd_sk.84[dX[n9sTItNS)qߋAi8ziNպxM54MQSD?}&wǃ:;=F27i{SKDaLR4+cԊlAtԖF{s^dL5 mFwsU'waFG s&% dZ_t9OF#XNH6OPz,1gU< H2VцTk3WL G*_8i([tCda'͞{YT4"])|#˚U4 D˽n[ݑ-A~hKyHϴ(i\ * =\oml53{ ߍIJ@E,Fw;Uxa}gU{j5{z;$Ilzۀ߭Ċ>HM7FRuq>6%Ed $b*RӞM|7p|&ӏ ?|ޜ DB9cQh+Sl[*/qۇ29v(gGUGZXҒɎ"a>yޟvA+>FvfdToBbxU&qB"8fo?\}Zl_ N}4a]W2"|aЛ9!1tNhd XM M>)!M4TP4؂U4;~pC~>|},Be\IT}L :QGǻ rbF:I7DFo9wB_>0R|t;ɱ!$5[!0"Aڸ L[laZ} ^E6hAoJ3VM t2)|~J,3VDܰAƒÀJ$"~e&_b8OZE*s2Cfř&2{T.8y$j<ә~s5l+Gih]&m%а'#}ID<(+C汶oi4rm#B;%lՈ@y+/ny e$H[ [Lȉvg0wc%V_"ƅw|~"oG.q(s?8G Cx1i fHtsH;/ʴZ0Q\ *PEЭb{J%k+O l˅c4X}KowlFžQӑw JoH&H标qٚ B )xJUy~nkJ(:ZjYY{ \dcU0TAB=Nph8xw[ZtG ;Axw'p .ooǖtFGߢJTgQ1HZD0"GG0 4s$~ dڜr{ 뇗 k}pc֌[idqUl8% 8y';ӒpSe7pI攩]/{DLQMLPޥ$[9Hĉ}IWvG:=DCONR)O >+źOwMwu+K<*e(}g&LJ>ػҊ/X=DwWx!#I $g7~M20Q6+T=-B:ױ>gPwsN\J.p׍n.±.Q㰰_.⿕ LN}xĥ}iѾP/dAM?Lgu_ O7ۿ6?y h !ynVjx[2"ٻCm၄*`u31VoiC!jw"pC/Hh3D⊔?i:JC^t?9u:~W͔Iկ: #dZ8 a|uGCڵ~5gV˜k-Ϣ*'5/u7ƼFn'|;DOZy?'=fqRG<-T4)(+Y,6atiQ"X2I2Mz<͂fl"_$?em>ϟNRq4V!4_DQ:1٬݆ja&ƀ5Q-]lJ/^>Qa!wv4!I& ShyG+U&dۿ̇mw5pjš%Šn:#I#$b, '1s"3;eہiǴiݪqm ^£+Ѡ$MԎqo .lu.pmoik1D4Y[)^I_P~e}K2cv*h$BZEv/X3n\?QeszGFWSbE^%6ĦEJBOޡMewX8s?Ü*2 Z-J D)DH*#{E$& | 3nѸcR?Ŝ~b=Lpd\Wݺ o.Jz^vvJ˵C1w%/x B`D0M ,/\ t v]/ͤWP[з^߯u&C`}.}ivM)Rv8P^4QUs;gT{Qc=Yey2T#/C3%-Beg3 vFQ*@gzt8 C_5w8F]LAP,Do!@HY?vXh:;o7HDْNGujKy{E32}0/-i#y<ok.@  1!F\_U]\+$Z$"}>Jdތ,kh~&g8mq>`\dN#v͹HuD3b|,g\k7xzϞ%Z>!h?7ō<%8r0߷">h=*=|-zzk-3:>w?P"P?x E yƪ&㞫 cѤR%ۡj%ȁՔo)azTCPc# SjVX}/ ߜ9 q%8 Fd3ߕ ?_֋ *\Xж>0-O6H>{nM$ΘRbJgy 6'E((^e?S/#Dd 7 RZv!j A܊b$.ĤK Gye ="@t&yþE'bu_EH8|o:PlNW&yyV-9/M OdGHbBa4ioIrX_qz鉥M+)u $!y8h9Z"B>0y~RlK3-͉wX$gT~s$ R;Z"O1ULïvzg#[FL4BXkv 9[l!x'_v7&?jLhEM'.F{!yQMHNQq$&&?vYprEtnUK[pNO|O' c,nq 9Rv2 s:7i1ƣ(3GkHY9&ȁԉr>}4؏kP@׍oؔH[$_j8Me[HGeV:}ɜ q7a]ݣ(ݕ;X9(+Ot+^][=$dDnplDpu 5珎6^yz'E3Уw\[J0Q"s2VM\u]={Ot.,WaDXDz+cz=]L~CAfSU\qLҋ΢ B\ߺ3/JFsv9%RnsER/2)@t'%:+P#DOhUQg- ?I\Ӂfr;!HMYqfs,#w{Ϣ0R퓫3Gw:-x7Y ԦIg~ ׭-ȆBBZ&hGT-"Z\R4 ]8Od)#}z߹]Z}&FGѨ(c'NZk8q#)`#"}{wi[;.]3QRTV/ %C]v(X90!#|z7Mq,v+wہ }nn; H"E@7uqB*BI\̉BoՎ5^b`+sΛgXVyڒPqҰ߉jo:,}:{+K,'w), @dz m;\iI4r9njroyJ069 bGH?Q̯M{ i! tHG ~['I*K"P$dJ9ߛdZ/I/~vݴ~tH*hBqCmzd;ȍ9{N'm&oqCw^m>UQ6 C rljBuݻ-D~\s-U^cE%'9!gUFga,=25q5a7"96f-_ N)3E_id{%o^/ƚ yʴ0hpA;BCu=w@ 'w?)a'\a%)F _ߕ\,9=qWPr1c=/)D)N4$NO` N@8i$Ɣ}? v܉Dj>]/f6I<n0.kSu <ea,?5v2ٲng[5Lڭʭ  :Ȳrz?7ց9[}a 8 ik Y(6Y>G9\"_ m10Rrd ݡԽ1aHCKDx7O,S ea ܅a"ٖ DZb#"wd[6wy[]>|=W{ZJ! 3jTc\"M,-R"|(BX'mES~f{QTS:6P_9 ,0MrZ[ԗ gGۺk׷?rBn;?)2wKfX,淣18+)ڧ(躱U:HE2cejwKؘɭj{V^hPZbrg\¯jVcXgؤf:EԚx߅m9`q{E>ssg}@[~o΀zs}:;yS޼l{<9Qs[qٮVk!=xY5lѯ<Ѥز{[ּ,hpLw6_sQdp(7ї^uG~QGt-z zBSYl+u]qL',pL'VIRx=ߝq9zrx`>azﭾ q;ڎ=6QfF.'C,*TdsW:;#vGhw߾4>o+ƢyyY߬+ ˠgfXHŒ _r;=j9W.07جdZhޡ̂=r^(#~2{%>3썯kCQ(>bI٫щpZΦ f=Ll7A%\Rڿ23l0 {R8\M._M-ҿ-p!J7s_d;oDb̥n߲\I%k0.2(7w Nqalu^%&u2FTPaq캳}*rh':ij,hND@Jf^*/>~B*qӎ Jz` KTTH\ >g"ֹM'Vҟf+ʉ.awRxt[a@KDv;dI; H㛞;NT}~<\B (=5eW[džCdFqVZZ?+x_.&VQ! itLo<3QHr-ÖM߯ha#fЭR"a3IYK mnwVӔ9?ȕp֏/ۆi?w 8!x=QӬ&% ϧDw2VtB=jΪ}=vCr &XpR%he2)7b]2\ߌN*;O?8l|9 'iVUBi a~DzN B1]zEG8ɬvyj̒1vOWNL^co&flh'>P'c!=wO' / ^SuE(NhŘ;vODt#6 I%%EL 69`xSw8,RXPU"DɇJ6GsOJ*bT$Dȫ |@hTEtp;8aR yS=e4so<{T@r*3oTRq0DZ^xVn3|5 J>O_YF[(B8CεС܉xu} 늢~sA[5MA=.t0=-!",ΧXY/x=uϡ~' T !|-Sh2AMMJҾ$7Z82?$[:v\_~5\**YvtR씦6'C AT,$jl8/W=}spbZ"ed)d)aI8̯[K;"A 'Kf| /H)"hQ{!)9n-GgTƥKtyat8@jSߓu(_xM-y?u*7[o:\Y|7WhΕh ԓgŎwd0OH112u1:VSś-l9yk-)kXMn$3:mo~W3Rd@>g#[g)osAR2kM{ 14 #kXs?LljL;tKD! B eT0}}Ӽ.Ox G˛I$ d1R|>'Vo~sR2m y7ŊG?^7Sg7dմM8,h#&N6TةaL2EHP!KT#BT % BV 3f0޾k7& UaVS<`kz"틳^&H &陈ί?S%??0ML͂fuI!~㸎u6 E?ׁS&a,/vfxc@xDv,A2!Ɇ("n!m?Wlio~15 {/M>N^н/&>se.@q.yݫaA6h҂%A]2P(ӈGA/ē15 Zf݈wΒfy ԁ<IǘsAnrz( 46#D~*) ?^}khݏX?@P~[_l*$(nԓNpM*sqv<:*H̐\h6݃lVA;H|r)g;Պ LN=HXO==|cS!JxIԑS-xMր*J`C"jy(rAʁl'洡rZ1Vޗ5pP Bxvi"gs k@ ֎'f9}d:r`j|?c5H߁667- Uչ͍"}i`pMB|Uo?ڢeR:VV2bQhN-3,S*e.Ć\-оaX3f^hЉ:'+8Yil,| a.Pa̫(AII.?Q؃_0QSTDUF:s:8_jךtmi"iF)haLn;Yc%ەDC= RLm]0B*0&!ݠԇ?-82LA&9>H<PP|ѨLɰj;S-0e*-GOZU>+$!X?{ 9ͳxz~>/?cle`'KǏ!U^>?}^dwȸPc0&"I0wXGk0hHؓ?tlr@h 334N. x}h_2bPGi1ȈWh?Lj^_f@o-!6n~@_ezӆD0H݀~xZ3m A&PW` HL 05qz3!c[M?'X76xDo0KD.74SvvE۲ ҦII]wz Qy0-SR+ꎉ:=$?`290UFdQmn_& D{6 mq9e7aVՐdn0 stl]ESqcTv1jKzv4Q\~v^ųbq#cs靠9 5yyly4̄"*z8rI0Yխ~~VwHtv顉[AcTm#RɈD${Ks~aai($ł '(8l(A=WDR$i!+gATot\\LhMMZ1= tɗm5q/B1]; '  DAƆ!.YQNFо c#Trz0枻2n.EKB/ei ҈h.R_)RN}".ck gSn)sf.9u\[9/]%&;@X;8 PG4lci6DF :16d̛$.[aLe2mKcLkfBA%BmEɑiS ӏpˏG s|wG 7I(^nfZxo=2l|?3byy~nn{7EQEw~['ǫoDbIDHI! Zbitzc?IwehD3L1Ҁ-cSrgv xYcOwSkCDj,D!XwAeMA 7u׆ FQX椴'eXѾ&o +1l% C1B5s~ ؏7;Ex ۵7, YfM1(3 6x !śB6QSk6KcuDD jK蝞R.[j| *Qo.zM#'| EDvkFn .kq75l5,7;ŃK#O0L&}'[!7y ^QAPR#ۘغ e,Ōq=sぷeŧh~ADAùlծוltx CFlZ !:|:P]X?6G5r29Ѧ;`A6(EqT{0CVV G<"%ID#r?1Jq,Lw`__SMQtrr:i BEdJErC}H^$b1qW-\O4&[ՇU.)my4lh33w;Gy]~VptK菔717bvL_]7> p*3uЇq INc\-3]UC7HcH ++6m #a' TM `;1)kBE1s3t,3,9sn~i> F5&k̜eBwZ@Cpֶ>8Aș,"cFqNȧȩXcvw,:}N6  zD#NYt`}(ǐeϗ-<+}Ie8P?V[I$V0AWeoǩ?D)b@h0縙gNurZ7$ޞ9/x?jܭo8 BqL&b{>vߣטB *ʜ=YnrxGc7q"+0fMr6 W|M kzQ8{>y7TxUKsYyXN)5P.w'zf!K#yd&~$ KQ"i #R_=?~Ǝњ}Mb0;nz_jmVăCWAG@ɠ}W| ZO?lXҼxKFS.:VJB"K8 )ٜRdHţ{`攜a$Cj cn2k5N3vCLޤuý6nKv&))ȏ0wrE 9lQoha`4/ 쑓j-ȠGk[HħVa2d xj#G˨=JBd&bl /٣4֚bfG\c.H󹓁8f rmPyfٹĪjIe͚JO4c|9AgtS2jhɩ _?KW9=tJ.e{3!k~|V"Fn };TTYF-w7QԍOfR8qH5`=UW5G\]KIq\uԝDq揾)u!p!|}KmCbeua='ݺ}{U68_]=}.K,>Y7Qx'XSL76!J^Y쯜ݷ+T]Go}\{jG^KB:K!g${';(QT4_<`>OZjaNT!z\^>hzۓ:\jRݐ$vgaϷ\؆C#T#F&t'8ȃO_c] —h>3ȫz0 ŧDxuq6yx뮺oqᤶMD l(G+ħ~W9RWXhF,Dk,E#MH.^+_:z|A]8>g-Э Hb UޜqӫUiܺi:꫅j? nF]/RϝܢJʖtu:c|:e~:V+cKΛ;yi6L+2 :/_bh% >J#V]j8IUѻڹjepd(r-U'&^]u7?kIKikڍQԼȚ|^]fD׋+)lveʆز]a'$u/~.VQ =`GS'"qSv{hn=D8'5s&oSm{l#ŦMGkSÝge0;gfgnGF|%jc]ݪ.p(\ٺnwŧ1gZ&]{0û0LqCdؔst_nZ7Gc.W~?JG>1:'{;#inm|^T,LFE2J{m}vK;^;gg o,af/YYqm9 NQ]r xO/uє`dZ2ERD ~yN4MPg#k7m((׽X", D֒ξ]Ӷ[sGc߹6+U`h_[~qvb["\ai15)/wK;[z|޺Ӌ ԒacȍKͥج_:u2gǢG׮n6r|%Ƕ;n+aׯ~\pm[4G]OH{9{&OXDzG ]Dpeӧﳲ:ᅾMpN+gs֒D=l:~==PKz7|1~oӾGiGCQůòql|v +"uUB߫Gl44qty9`H-g%9'e'Y vODYOtɡ HzٲR{x%z0փv~j(Nw6v~??[ykWK}~/b}kX /sN"w9} Yx̘]m'8"9݈n׌&ɥ+7i_8?mO?T-ܣѦW[n"z7O*O?9]j|aCb{}j^߄?kĝW~dEdL]~1gݟ\#wt_Q,>%Q#c2EꤞFwϧP1kd 2|~wpnlzү:5mܶ+;?(Wݪ-)'m~.ӏ]~ZUPdq8ݳ <FFB:<(R.CaǢҼ =$ 72.9lEU♐&C$2[?_=^2ӛM! z\e,aG J"Jk_Mwy`L{#^e>oyf4@(.zC(=[}_jYMxG&Heѐ ioñ|mѧ ec+ kAo>M"΃#nąsc~˼sl V|< _Ic~*!NooߕXXAO#;p"!g>{a?-re?gF5y)=q/kXBM wvgqK?x_yMq 5r#E! gĺl{r4xT/xT~a}d̔,u !N$}*lLǩGv#}1 Fիu&oq iC~(p,iaj݉l8Sq 0tWy^&a|w̲{8kO.>i=3E; Жך'=ň 25SlC|<]i UUG:svkæ 1=۽8P0Wz4%trJN/mC*.{!ygV-}Ѷ.ãz?7Zgi47om<,v 5ﬢrZpK}o?YY~mWvXt]*/WC9g}_Jp8xz/2ݝiRq5UR+YBS?+S}Nf$8*uz55#/E>ڲS*p< * Pl_[/J4,(I?]luw~?# .>i'ߗ\)nZ'78u?GU޲V3-J6SǴ鎶Yd萝hg9R牯5mOC {k\Nt._Ռï0ïWμp,_̧"n /m{ ^d7Z攦wplYS̻_Cvkm#W]1ӇT-!ٯK}S[$gy{П| %iյZAjiGP߿v[<Є qc[Ϫ8o8c]R:;sNi -],X7:ߜjҼ}b޼sd2S^Y@(Yk%'I%:~)\!-4]VNA 4S-V~/%˯ƚCߥOLnۗf2Kc=zоt>Ty/=x V[ F s]+]̮:ɯ:ȳ~na ܝ՞Hֻq}ܭligվ\pG=]˴7ߣAe%B1-PW۟mm6&*rɭV%v:݁F|a_vs[3_TOnš|{8Xbuiž{cPuvJ$a}h9AnMq;G?E6]]=6Λ*Շgd%0O4,aWǏM< 2t&eԦ0ͽ}釾Cgb!nRn殮ˬ߮=uݲbg~MHt,<|,w\rO^`vGFǷ`lݜdeՄ[# Y9̱:̱#-'9R펵\BBe:5>^}KXoZtA$3X7UڽA*\5pОMq$BR|3|7̾izT5CKk߲W?"3{myAZ`[Kz{}{'ãV~8 8ij}]5n}V˕n ,[Hͤ%^8AC%>jJJj6s˺:N#CksRO}_*CHK>˷[;ը蝎Pkwmѳ.#_ee.e[3;jضc= wl$˧Wl)OfVdžBٺJg?}.,OVz=Szwvڏk:lk=Ӄeu,Hw e=RW:Rq3ZvJׯǶTym]ϯ. -׽l8]+{Yvcowy7]][34᣻ܺBcjq>]gKk;~w6s2VgQ3a\!k0˵W>03v#6٬}|k XygR5S g:mzΗ*CL;B$5#J2Nv-wN|64)DG|'_GE %d)NWfo,zzJlwǶP8=P/Fmwr/K xiYOj^Gw+8=Z䠫:~Nڭ}qMhfEtx;]P朋{Olz$(HةYF:FߔXw~x #Wn}]]os=N"e"1t+,(u(tӾzЙ(usŹJvǶoYhJ+V(vk[+HԱQ7$Xtk]om2KxͿF]22b&E8$X~mAg%]FYyJwmy::`B^e㧲}Fܲ̉wǠf+d.d|:+>rfSF1vH^46CƎ+v1\r1.~9M,:B "#3GWO ڠ"WC_܏݋vgVTz6i5\??gem=jyvm]r;pӏl0p|m}`ϻn[29j:{6 1mzzEXgcߏe-?3&ۥ:gX4yZ>wG9/g8^?f|3, jlEV"[q&fGfAIe%wB6ז BB1_(Bh8֛eU T^]wVYnz Âok m:#s+q]Sk DQOOJGgkŹ#NwSRú^htʄ(p [}'lb{_ιRe \e%1ɼ`UvF6D޷jh(Қa"$ܛIVLa1pqȥliz]4_FYS}Cwݝ:.<6';>_fXk鞸Ͽ>V7w$뙏,a )vFD&d Xz 9{-ey_/cV o0v/7lmA$⿎zT2Dgt*_x}͟S r9S4׫ SE1+((H1. `QTPITLVbLD0-DJ5HP#vp1D``&fXMbf(b&B"&&"HX Q2iJHE)eDTU4$R3R!Aj*&dj*$(( b$b"" "j(ih "@hfd) H**ih h (b!"Ib hhZH"(44K$QUDL#E4 HQLELTQ5$ATAM EETD(A1ETMKTW "jDHQQ,LKvĒDQDRDS!LEQLETTEQb)Zbi&J(bD$4CJ*PRcM##!H\!SDL@Q1% A@MCT 1%2EE@I(J bf"b` R )}1i 64H5AIE-!5DBSO%4RIF Zg1TI4DUSҔRE0DU0ې?} 魥K=op̽,xپK4_ .kǩgrg}eEfDy<AUmeINqj{U|rϳY>#[H?RFGy'$s`5C32bfI ̂X!Wg/e8?>t-mC uJt*`},g_K,ȱ`e`ba?G-"%[LZzYG%TȧF_|NG2Qϲ69e|.0DtqyIhZy%7r61 :J!o}L|㻆t.} [>x9قG5O벊U zn{"~;?SI9//[txy4u}[~l~gmjz1'Esl#oƽZSil:aF:FIb7~x-;z:63{n_u< rcUәŌz򉸵=D |Gm%ѐ[B4:Z]o؍C` m XLcF;oI-p*8Ю҆ 3%=\T/P`ҽbnɡӧk$Y 7DZW_U$|+d04*7>;~XZv`ϔ-Iw{g黦:iQm, Jz95sOr5ggA)4胗5aB]E 7u2E=޸W+/^NQb^E!Q3I'j$uD]mj0eYX7h?#?4s9Dc@!e>QOV6}Ц5PV{вm8(h"{1i]dX.Vaù-o` d-xzcEzYe ж7W}Z`^]db |K}5/ng&d&Ld'dL$㸘t>F<"KU?=m֤[^ǧ!ZX_-aq{$k9ԗnW߃?;OlFՙٺ:;z!y_-˨52d4ݙINe: >"]=:HkG当w s`,% hftBzF\;]`I4wb A(Pgg2]wg q ?smm^+'YHO%44;tnHꆍQ:Q\lYuchew|<.@8Nm7#-.? w]zd[,Zm"ge|YYCr!ÀO<7;r><-J4N%ѰݸYx},83}C4;utj HbEa㻿M7P3& *>ms[$ݦR,~qm݃QBWdSjq1Ɔ&wA(∙V£oQS8nWYaH|&F%_Y^ /7'ȄF2Ͻ[et.NlY,[#0|t5gZB8ׁa5x}LdTktim]uk߳oO}0Ӵ[qn|wlsc &\YeBbsHSi9v\:0 kդ;径,TBQx,ZoៗZ#Ӗe N]E<:Wm=\8ڝHYEOQ/Id6&lo/BY䠇)dl麹0رq̡ȄnoNyϠUE1i57s A[2y5yK ^kaK_|L1޺^qeL+#狓Mv]߷yP?i[O~SҜ3gkhL7׃E\|k߉yVXQSHq(MVpօDcdWovY%!kJHk1r|G^`Ҍ&܀es?Cos@`ϧhMzt3GfѼz깈;CqsX}(vCPm1g1Q?6y&y(i8':Cr3PQ?i#:sIBI'͠zA&W ũF5̅.Y/g׻Zt5fnM?GFM 3X2o_rRn>o.Il{֯7[6<+^qW>_fJ0Z pqzա*ԫIo|kfV&\R3+EuZVXzVjf3)kKRJ]oz2'\^\)/ RZFf۹s<8g(v kbOuImnkk|m=n2:|g;s[RGQS`i\^s3(iLڞsk<^M|s\4%qo'Z6\86{c.e.k[sTi͙7J \~Ѿc+?x&53 wt5@ Mbe3p CnINbOǹ,>8cB!ffאk/7 KA4DZ>7jPEl0J 7a`g0#xI3^ހr{=B9D>%2n.D*~zC1/uG=J]gM&XX僮M8u,p=ѬN?1#X;Ͽ6fnLda`F[+A:<ʻo)ZceM ?zk/(ֵGedm1v|>sr;q_7|U<忎1{_o)FNj V>~L׭an멋N"B~QhCdBZH@T6q /Mjyl;ztmYd5Z5]aRT[xXZNZgRȼ"TTͧ,)\WD~4'{[pX)wye[;RB MNcvQ8r(k$:ٲ;oneg$4AL?&!}u'Z m۲*Gg7($+<(M߁#qGnnze2{V6Uݘ#iwn$ȏ̼oHd'_KߛG^LcyBW=,'o:09>4ȓp-rΐyWp{!`4bg$&BG`(߬MsaA'd~lF02hGmQ6T:?֔u| ~Y_5-q[?B0o]^\ewv?W)ضF){Im\b˥#EBJux\AN%}xgnvi }K['KHBgO+rڛR|]d .{!a=[UeZr~l J}R|k|lsF yq}q{M|)c3cKkuA{իΪюg^;O(L=Bg{lqْ,]o_l^eucm -̬>|QN8O~__6|Wp93ݍj~{ָE5B֧-|qJрYia@v1+zlce}-5ϣtR\r6.J$Mq&58Ϟr'Ґ,pohKi%=Mw8bUBHܶƌZzsѓvQS1/L~ՓB~z=$;kJK4) 3v_'OAﯝݹ&PkN7ԋ{v"T'Aye) aEBy!ˑ2hq\w`s}f`/)rGd8JO)$ OyHW343?a7}7""""DDQDEDDDDDDDDDDDDDDDDEQEDDDDDDQDDDDDDDDDDDDDDDDDDDDEDDDDDDDEDDDDDDDDDDDDDDDEDDQDDDDDQQEQER(+B3 SN;2)ĘU$u ͛;i ѯ2OFJll<0= &) 2awf&4]\foF *T#ڇnj|w'6-'l֚tXS%.T>&ש0lɅUx d=D9>pI~AǺ*qcw)/X5&oyG']xT nBDX/h: ,܊i&I&H1n?tK^ϸ#f<87u1;NѤow"KXq*2y7X-V6kf*Y[^w|+=LX`UQ YC\Ŏ#'p`aH "ams>y\oY6I_D*YmgQQsdf`AX:ga]{$G ~;"V=_;8L<7WJ#=BRrqE@̔ΑAܛAG}ubb!GRD'?1!y;a;)rCxfr/۟Y=.* f"x̾#>{u!ϛLk`IɁ4]Jr?S4iO>LNW9тc{ډ2K1cCR23@=p|T?gtv]ge#{neXb.|Q3;&dd-r_ K a1"djR~1(4^$M 5$vܒ 0DE Qu A`DDUOB40@ i=G$ W\%2DLYaYhВjFCC'?۹Ӆ~QS}y)?+K =y_/^CfdɃI ͹ao)"[%wF'pe{Lf'X9M#Wy\a!{q^CsCd9418d}o x&%9DN$">фk$ r1Q_ L8DJT娸_6zvYZUf/O>/-7D1vڴ!tAX/ɘK 6__tw5LYq C+?umb{Y+6F |!!(xx)+6=Q |%=r$I]=.B7E=lǎu61jf`;v{φ:z7ҩLE!E숻  X$r*S>w9rM$y i7Z0:f,V8Θ`I&^a;#!>lT+<;u7wrJU"B]#-#īq` Lt5׹lz BwRSI"h St?s*y$w LlvmF<^2m7a_,&qC1= HcUT5ō6$43^@`>A05c3BfHaqN㙻^CSN'C N|ZX 6͆t(ZU|>8wЁ9s͓h`)؛yODpS!J8A980~w4 ["z`.uo@ZAHFS%@O#XM5l%B{f@~tи z'F\eNn: y{>]G^ԉYB + M՘T48צA'I38SfGEiA$sBLϱFK;.eՍsew ęZRf9;KAfU{CTIA _27Ѡs B1p \7u9]J0,xJX;t&{QܘFۀ+&XCK}L-3TL.9Y9tO•G1ele8c7*2f=QsjVdKcy5$Wkmt5gOM nW"w:sA0$vo $$0v&}y]3n7C\T,fyCuYY9aip pٳgL/nܧ2eB)$|1he,`W[8S8F5XS|t?dtg$ <+[12phdId|2 JvyApy3+nrjAԈ0wbi,r2L^Nc}v9țl $@M ڱ~ 8`Aqkh߿*aA.*:8?П@}kେIU ZυU53)[dy]=H۽ԭ3835ֶќⷭuSϻ4/׏sHEdH**UYlms#מ졁5Zd!34&ctitoHnM+o ~juګ/9TIGnhO2|L-8 8ݓ\ވ=wW\̭7삖ESVaakiWˈ2V4m~S8'fTy|PfbK8>IP;VY[Bm ;_k-wࢡOx-"3['ѷ wCJP{mR4OXԉ5T1zN;a  J(hnǍ8*tqLotSʹ嬪&Uk*knmw_ʇ [>b^OyuƳ39殍Ong.JUӾpܤH[KG /sg\Yң]`({ee Hdn:">݀aa~=י:mY L𵰾llDTe 1p&{)Uz fVfƒJMI$b /p20E>yaZ#jS"oPl2=^G-#fǩ SPhh72\$i2B _篣^u!H9!Rt:21O{r/k;\&͎Xm8zϣa~^,?u!ԏ7c^:د;c>z獜38uj J__6s:+X裣.MtVpui뮉eWT.6˾4jUZ3k6E>VbaVwB{d/wa x=3'OEnv)mu&Rg Nvxx~$g\ɵVu9YBTldR0፫K9\JLΥ=4)^9H). pE 5Ƴ_5[}o{^5bKbYa0ÕKx1{Fsy<~wG zhf߿q,υ+]Ďr}yoϦiDCF3o^fJ_3b Ȍ%2_ '>Yڼ\u17 @i!8h8pM`tqT?XJ8!sYh :rs:3h%@@C3iʑD믮Uy~Y_#ľ[joWL'fq5tsRp>R|LӍpqU_oo댛rsfZf"E RxO5=&}Ha^ - Hf]^ւ:祰xgb3خV֛Ȯ]t 2@>}k3)4C#@ RXk|5Q嬜ᚬ/{b^ ͐=7TsA 2Jh!I I#K 94j>_M=cÎ[-ҁ=vu "FdXS}'#]fPC(,5;۱#FVb`L9P*ގ|'$~Wl\ܨ.o͍CE@̈́qBMb $G;Qi-C-v[Wo^'a aå?yD"OqߴD͙{^7Y;f ]qrICo7K|ɫ](tiɧ{֪Ko:Zy} 8*<_IO7=&lNާ%guЉ/-3RW@צ];~Jۃ٭#x\r+Rٯbɫ}&K8w}&a5΃b+qpr8a"8q/l ubQ 8e+٥Ď ;` Tm])vm.EY!}KMxBhΒة_swK}ҳ-ˌa;휫_GuB2mB[XilTMJJ7O>?Om3f/t|0c~nr?H[:|8wPyҼ|}?ˮקo#?ޯV? 9N:iG3>1=^R:uׁ 2Qy/GWe}:g;g?#֗]}Oңvi[9 |""%'y.mٶ)a/XQ4GN ?GZ?4z̻?{!+ְ륾ʰ&a4MY剏?%ʃ">_>z_ғfg"OT~ ˠT:{M[ɏZ5iͦIzT.̍B潎ee-d<}ц'f\7M\?ߦo'҇).)%t1*_z9j943}ɓ)7ze~/^`'2?|zᲟ ai{f\Nн {Kk,m;O?աU״c%sSl-DX\g[AKMvG(;ZM5JJu B#H?I$K"_d_neϪқkd$ ,b>1ޭo.7bft+v>w߃mEЦ7nֈ}^** Z*GMK4a޽֎~+3kU Ly3`Kv>=0nQNz~*AҢ1l] Wd!oKb ٲSgD nrلC^.XFhl`  G!s4ĐK"tXۊ>5|c+z.[ @`3+儋|DŽEV;j{O3|85Y/Zf,Mjchm"sp N PZPq7'ZWٲ;]n?_:+cOUHH)7`g6[PM1BڸlMܙs聢N&tP6̋uxcԹh۞gRO)C76|A > e,#]Ye`[M]DzpZ.c՜ I#Z!X-pN6mZ&yyuqw7ۖ.$G7[Lm8a>Afx@k8ٶ^ϖ2RLbc$!p\#[=mh((.g5oZҎxׅ5Zx5kIE;e#) KeW?-<0JS9<0&/o;v;8(e ֤ǿ[U2]&Oz0^[t&CKv@ PAñAWJG7}Zfy,o/qm›"d֬.Eݫkak7+b v;R\|'MZS @ 1}TPýnf-pӱ.%g/l)&]&f qAel-Rw SZvX7 i%.@:DzI(5Q6IrC$ 6nX놩xĦ?D~n ˤ SeF<\|:սDޟEŠdՌrnDG_8>_:+Jp?7lmҞ͠+K T-BR'{5!+jf>d⎫]&6M:xuK$ںm7srR!niu"dm2μ=5Ό IXI;&Qv4JGLGм;zL鉩rr B哴R`A3P -އʥ=r'-Aښ|#a {\IY({$ÄYQtݒ"pH Y=QfcL6ЙrcErio #(BG$vܽ={荋Rc$U 5yvwž|E/]qLQ =K P{sL !@鸫2}'a!iR h:>TyMg^CADŽ )ؾ! G9@I+PcQ1"vHg] NQPkCt˺t_{ꔺozrW4ѓdN~H&=9&_+~/g` {D(7/QF.>ڃ'S؎|k6$Ͱ9OFKU'5yQgqjMjv^;[UװȔhBLy)>e.(u`Ybf ,spp؀t}j_JģZM ܠƮs13 Dƒh 7Uw& vc8l5^H7W K5F]Zc-[Mzr;w< 4hW}WqATȓ!I.(X#G.tbry8%fnPAmt]6JHmhSAo^X4! S}5qSld%2%֙kWst pG6QT?o.X[]۵՛!ӵk@SZp;mu9[.7 C0"pH>P7|sC347oJǏD@v.X%{K[a gƟu%k$åz oDLc^Pj# "aNT". VG+T%EceVcsӏ)ozvZBՙZ^ۉ%MpL{V,ATjLAKL|'fHM6'aF:]'3]-0y-U#hoGٺ%bVXk.,4nFv0^;PѥXz[|DZp=qE:kN$:q ة\! !uK,=w= ӄ|ײjM}s·a[V*o UPnJGk9nE':KhoykŞ N,~{o 2dinlnLKf\g/^^:,15kd5n$ft,k sc9[d+doS{FV5֓ |"):v 2i&0T6x5oCÌ}%r hG_9r~Whj*:?CvKSi#)cAuIZ)d4g9IVvHA7Nۓ`GotD۲^zI)/i5.fnvDkތ*[ҴG |$o# 'MWHH/?Wl7c ٲ'M :fԀjZ#DpsHMn]exu:'Re 4/u*C5+ 7z1EpCl8&p"9rkMGaL;5'xЁ]J[!+!p410Գ@!oi *R)Aî ;=~HGdGиCNnrl{9 _wF:cw@EЕ"uo*uÜ_mHw=#L2͜- | ]WzJ͏Jc QotBC-z!˧y.r6wy~yv˭b9Ur}h~=w~t7ORWg;'b:!3egQXJ2AP^j0d ]DL7\mb`jb rf=`zȆnk{or4Okɢ>[Lmd9aG-((/\Խez!S [^j5owbRqP0M=h1Yb0n'؇}%acUI6d^RJ23.שptL١wGAU KX%/8ݬb=OgQ??A("B&Y! e JJ"" `)FaH$ { "VG>K+hNqJABBEQ '| `)ltj*iD _p?ƍd q2*U a фNN'ovry;1'Ct8bbB!=%"(ӥ"#fZ'l`˰ ߣ'y/xRqw'68sQ;2o ,!LJ; l8mSqxٮT-"BfL'SeI*qsѻ)4o3hAa[LV,j3~_ 2P&~7ܝl&rѣH$s;:}G0IC2?1tC_dy`nE%)ićC~vKLșO}8n{BH@0$ xpm'07xYztBNIpu!Ƶ2Mdyӫ pHs$ZsQj?ѐ|4P;G8p!rr8a`T{HOq C,>Nsj -Na){SCnR:}pTMݳԬ_*t=y0UUUeUUU`/ywGp?]dS?e^H)=gr"8&Yܓ4(Q F=JJd[}, fo|D%Ɉ$>>Kc ?Lw3pW e51>ʑ㣊X^GtIDGÔyrM;t-J}xhMGٸ{S'Ć*d&[&=H_PnFlOjxDI"3 ~/M\Ύ~&gyN~&#vg#Sq5jCN8a/ >O!"DKu+>5׵ZF*Wz"I٘ItYʖ[b&0fm0P}l6c1VS`ŰLrqԂi,Q/`8HyG@#=. ~5ݲz#]jiZmO<,n B\AJDA 1,ĤDʚ9rv{8p4GHd4}͑AI ))%: dd!K븟Qɒrrq? &F&rplsc,rGS%-xXPj]mZS! -rPN0@I )%a(*$45x0?z4WYS1~k`*a$ #ŀD5'BC~~8Le-Yff#Ȉ ܂qHgWlݹdomqi22NkL2}%``vyj[=W?]=9ɶ45E2L&-F?1?bpvSܩ<`ҟYܺ{ތl;<:k=S2}uGEWx:z|yD-̯t>03DpVZ7䠉PqiDz]n-*ßS6By0C#sxBQ 2ge﹑WVԬ4S2( I3"?\"H+);r(Y }v5 QreGO%>r޴|Xѣ>M@ؓǘPS}f>&qO'<߉&N;lªs'yx-ԁd_AD8?LT7rz D`\ܶ0q$g0sHG{n8='?lv<ӇޞGǯ$xG)10B TqSHz !В!VAf>< f9\0>'}|uy;ZpxK*RJv&G^&~n=$K=q u<)~<ϑΞʞ$z4R笥jё%>.2{{n8=` I$*qQ擄e[{m}ډ|~>ʔDO5wrS׳\R<|J>)Q |G: xT& h|w_z'iT !v]̖O(%{|>Lɣ]tӹa?,q8Zk峑Sq[125pw:(phPH-dti9)1:RaNZ2rU=Kѳ2T;ɽ0`"w Hv9R`\ Bhw Zj$шLG0}(yXr`G8!™ tOV-`tcTWk1M%OY`ĒN;Gk'Cx)UN`;:4>+ʈ.;́VPmOW'@|["#"fv*/Sf4Zl3:ROcT{-S$ S$O!p}sQnq>>G’ oLN͇NjX!Z0QOǚH_][n`JJLQ1Tk<0J@Q"#'9^X\9E16evZ-:t /v8 $J.NVsͼd8v/G\yIv:p}O"j.2y&n+6r:_{v3׽:Gp ڧp>pEӐR>"hN:/z-4fp30;C,@Enp[UK7s5'{!>1 fx˭օy+۪Fb`WFdLv%(GCDHWџ||)RSѭ+lt|x95.MO5ʍNa^8ճmmUU?~5*^ﺛ?3*T-][cYkNЂb}pH%?vi7px ,Kuoǽɼ=4IAw ֠9czL@O˸Ɍ#UDQ2O9t:BN"t|'D&<#]:!HHQÚ Ĝ_=~b/9xi옒~[.> v>H[#:PHDAT/bS P8ލD\#ޅ$[ !?`R)Q2ʙXQ!#e*S*3^ p ŏ[c JR$`Kzb!"=h?p2pi?/ql:&}t~;8SwCBqػN;So@ͼٖT)4͇;-1 &$'kx/ܣEpiIgǼc"a" %nD8lDE'!X8xvR9t_I^cupw5hIuǀ'(mnH$!#-1;a B]`hSN6WA!% zSCAI:C4/XߴO'C$c]x^,**'w:qIKR'Nv d`h}Om_8Hnpr Ma#6|8N8A&$w;^RDuõ6r *wKD\9aBD p!y-sOO|2O H)h"`7[O g' r_|I3C-1!A*mJZ(QMOt)&j& ݐr|jsG@4။Im{Y(oO-뛓XQݚNv4z0/# y'UU*E}%qza? wK⎝MY˙?aS>KXq cyY(4N@xa#pԓ?EO8Ą%s:0) ]3B@aAZb`RHi4g"P #B+,T@4, !%D"PBH(p{]a;HcPiXK lFS*;'A&vyIwe͋ushd:}&|VaH"'=yibפxIZ%bvY*E'IC۠DsP (?( W9x\Udx0DqzdTܺfk=Ӳh t 7S9'\g$nѽ }rH{e"ZM# *|ꡥ Pyː^c#dTET0gW2 zb^֍v͑Od+<:j#r<zgyuR $H[-]hrw:$Jl5f1EqJ MdlA6Z..5bA nK[pcZȷe )0(dzs.mKi-bۑt<mƺF[˃wȭGUeWð'OGS;$ t7x#@,ny' &Cm] #Dhٯ:@Y,zH{JA'tۘnJO%6.JS/ 9_ Udk}%b.0Ƀ"ɂ8˿2`^_DGOCq`%n=<1<tSG uz K`7/, Q,Xu=x%ąEqL5&PcGᄛ醴'g'I,:Z-H_Eo' s#q0"2Gn, zJ0$0B/^˯}߇D2< x=Ɋ*DytuDzsW=/yS,FbÃ&jbԞO,OO6tN#,,'4: jNffe#CtMGG.ö!<IU?1@-L0)_i-禐HgۿQ*_ 3:"+738(Lx;rzt 9阅hF8QzJRB` Fa*41 5"Hڐ=ڷ|.7ܲlԥ*&1DvvW9 >ݥʅKuH(ˊMQu|X!ɲo5tQ. `a'W=<  |G5;EïԤ+:Ayr8n<0ha\N&gIEȰo=w w}0j#GN#ȌG C#0Yōͺy} @ i܍x{~({r@#tNru}f|~ l Q!@'NI QLXɬpU=R:N.b`r_Fwڧj_M&=f(7M00թSRnr~\k6$3S|O@š' z7{"y0v3\+rlPГ֊1f:Ht.<RTOAvȝJ`CLG|}^Wzkx63Dr}DDo68aѣ5FLơxB8ݑ)`U< c(70;*6F;Р!꒝<[_ ԚaV#?#lֲ8onZJk@J|dyyF(Z#}G~?v)*H")$@q8@2TNgz/@5Kgv=dFhb)rTEB~5{-t{1C$ :'‘NNIdġW|u'fs! 6b!YZi'YmOf9w9v:d쟵8>HhdS^f8o.cF_ˁߏSw6}T%IS)?O=|$psDf!{/> lC=lOr8P.M݊YӠ]@vvi*/} DD>(>Y =>+Ewtu2Kc0C3(u j53"*XMyp,"6q d^rF#MCl0isX8Dgh;z.]!&A||iNFStyvN"8I!U@0J4*!;OC}9st= rWO%p*uMCNGc=|9" C4rAQ;/,#Q)'-LA2x x~<5'd'G"NHre .8%0/*{_ԏ|Dt?@|>)>1R a>+$94WH"d<?@N26%"Oo~׾(H;MMKF65W3>5|OEU?U1UjW~w+"!qM A)gHy{Fy# )BeI="1<^RkU;*lH OQ GWX=ܛ :/h̲vzkg$ a 6 ݋N ~o_yaFO<<B[ B̹$=1%RN_S76e:JTM +}wUK#1,{AZbGЊYnyl.yRsd,RV2SoCbgy1jB9|B,V_" ws_bZΓDVW=m(qT(0]M[TQV=p I N4FcDC\|O\Izͧ'<^ik{֏vփ3௽ۅO.;;q~0g\nrRM$8WQn3XN)sQ_3}taX>z? vSXz&kq\TRu~am 0oOs#tw۱O~IV؛v\;KTPW<$X ՞1QgG_0s̒R궍:R */k =Y>zJGZ==u㛖m2\l鿊 l|Yl#80nQc%1@edfẁ2iTT1ldqK P~%#قUC`?k m_TF_L{ amZ=}_GlkوB3ot&O=_*|ͬ7><|?=-OON?@7rgf_K}21_G놗VЉQ$gJH^\W,XfdOԕWss#/ȃSiF,9S> P<!r IO.Q!Hf5\_(y𹍚Ȏ9FrYa;\c o4t`71N:P]ַoMN1Ebb {k,f}pcҊ TD;wv]uڟyD0Nk|qfccugהr:$U+*ՊWZ:8L (ڢiUVo8'FoM l{OCO GuA=NSiBtrS.8㳉эzUlӁvD]TmSIN6KJ7Up-?$3՞{V;#m4.= gx bG׵KzA٘iO$Rww sڅ#003 xz,!0}nwF.F$^1ro=t:d0%;%mժJVNZ:]mҍ)I_} À_Fflq@e p cUE7SSYOY>|\\oW< {U/µ[/O}zom 47X@7?7"~U/}G_)ԟ_68ѽޯk`YqX*߅In`vαZ ovx">%x\2X}X|uyCtm0K3U5+oï˫{>O}<$zXZ_am[# 9s2ר.j(mF/&"IĹksqcc 5AUpSC):C?/9c:{8 mVXf5fӷ:ʢkq^a #oCm`555GM>bqq 8:M=N"l1wyxn3lמ8HBƣu6lټF$6{6 9w"b@lh576v ]rQIbxIfc_UL{2 e{ƭBÛ%G??<\o9Wd퇏52)Eimcev Y&hf73Y~Ͱo<=dJSo#.ęٶ{a1kmk6F:Mg5sK M8]W wqDgX!ys؝Fy(_6iX]\ɛ@ruy9jʾ7|y+>67X5?_437ÍtzI6 n ~.?6rp-N6],wϛ6Ӽkk׽EOܒ{Ø(zv^.oѾu(^"URՕUBpN UL|CSBPDRR Sœdx$Ȥn|ϯ-g}iz{_L5j>nnC.Y!2a_kElfi`sg62$9F{5ց:"ɫg:LImG bvQ٭Cl#:6?<.a kP3ēbcQJ*x6w_p–B;b|ۣO|kORL)>#yiy8,fA*e3TE- 'thz&|RWj0 -MoqQJFڇiD>r{x$مG gv ;a&MS:yQVfC2v =8Pp~+Fj3i,Nm"+M4M/cl?,#}M{+}ZJ9pfbwIqc 3;d7)(qyA^}௅_#⎢y㞎dpSP!'XpY=7i#DFO 7q< w`(Dz Q]؆bE=f0D!rxC_/Զ5dOsFDUGw,c & x!"ODIr97c{UC,J4fH<wx*t7B 7s?~-_wu Ǭ_v$So#4:CIP }6[:NtO<d߇1M)DU^'KZ;n**G/ϏލeE4xg `1ø#qMƞ]WG\֎FNcē"GWnu݇'Ve^,CƚhWN1JPk2{N%鍊Kw>J-u+SzTxLYbs2mA'r§qͩ[1M"}K0޲ ykFԑ,K $Iz6*QϲDrtQpᎇC kSF2aE}iUamLTbO<(@M7 bI!|zG8Tvb_J*.*JHݹ}z%zb;BF܆[Hۓ2ʙVh\3WqS@R\Bi^1~~Q֥J3Z $p79_6sm~^[װu\]Y?U~{q|CSzbVōVTlӈܦ`u.0n n  kot SDZ"a|b T 9&+'ӎq5zCAE>[T1MSenGzɉ!I~=(#>0&5"(Ů*EoSj]#LAB=:1C`7ZM^=sנCkx6|>|>.#]>Ihj!vDL)3)Q=Cj$yw~9O a\z ـAU a*s'}5Kd 3I*M(BɋԧvFkG$ KVȆS/5m}H p6S{)=:`NIeyEPxafUTAdzw (AWdA@tR7YفG;`E4G;=$w8Xt!A! nbtG ,`b.,+f<$iKwauİGv=![ A'xᛗ lx |}^o6f^,kis^|}V7{mx|L{Ƿj$.4TwӟѭI!A&!h<}vue'@cضx!L&FKYIjr'4}&4NXYNcشv0 :<8M-'WcC~K\]&!|ײPh聄5lKa; uòcA׹;~ԙ2KnTk}-EQæDI0upwc"Xq&HkGivR=O vu/U>j3ûSvtOU<:0/M!uIE^NüR/Y;뫹gGK,Hn<Ӯgxu9v`6 #Sj750af7"@!@>ɢC {PCQi^9-(Hv4"p p58=DO X CnZu۩5L5rw\x.r]"`?if\1|qCp#|Q| hMVj'[]ϹXfZAaqETiMRLXw@DDڤfʓBd x|hq^~yכv`6:^ukJU3o$ |ńIUѪִUnSMFOPA7q+MYX^&GSb4DRwkך$m:17xt6>O{Į~3^=si#8\cn9˒A`bbv<}./;  qm 8+P &ki]>U%;1S=IO1$"30Ё$ǀ)וF+ z$r>(*٥OD48wohh`}~W0:m n֯ A|.Jdcg!~%"+_k1Idǟfd.\`v~jF^#D͍*x|G^<7:&ӾC6(lD\=7ii}k!ZI˙n2.ʓͫS_>ę!>=BSV e1M+W8hGXT9dڪڬ}/:DZBLjLG]Ov6IV`dZSO{RcTg`M&/@gb:"S cʍ]*fkejXDFa{x#ӀQQ b-|<@vGruyq8' p#^ fDA<[uC|9 [8/8U}|'rNZgcUڇ.4 /J{6>rKw/E@GBJK0qjU"L:1clO;=Nv0,0(["@y4Sd[6:)>XaL("#G>߉'-:HIs˼PxJ9Id+hC6Q@3TS ̃82G:=&{o{W5qHDT.}Jtb4mN"ñgy@lMqOGrv Xy`͘&jKohzTFKft4 T 8k2IZpGpP\3{edd5"9GDzVF7Ncn',e[ot}$sf|᳇N6y|} 6;RTwo[ pw# : l~z7l_֮ЖGbp{ 2BR?LM""|[kؘP0t13n/A lsOL>BuT1YF%LKK*(蹎6ZiSg:܁>y.d{;mm=AǪ5UֶNm'nxC{678u֪A%0_~h}q"T V.e\Sue׋5!Fl>dlJ\.A.&ƚL{1`"';l;9 T~ fG[{a/}!*`Q()#5)(rh`f(lq[ h{bciͤu\qse % M=ݤlaGE \ {Kj5 l T%DEx+22Jǥ& ib4)D3F&54AY+Q+Stp-kZ#EӄvaADgw~/ f"k G r;&`Uˠl^i Fȣ5ǗHAˠ tX/jFDoCli1՘ CZI>1Drɵ3 7Bu#V͡u%ȑK=U AcukK5#$FjA,SCu#=`@>^O{xqp|obDoGJ[bt숴$wCG((N=0 9DJYWwHa`GxɆ6HXmэABo&H96[sg2]\0z1v͜(d撁 8D$n#瓆*?[R8qN_Oy=pOY\rb#$ e Y +m* tooN5J/QqA @Pn>yr3p`pz ˂ dl#l[vKmF, e!쀂aٟCKC1 E[H=\zRb0\FNvBR& WC^P㤤0M8O,,^ք#*3DxO Y>CʙSuxOkswq܋MG'<"{yE伮>#eD>!A0d!Ϝnw.Iub B ] [{'^j:uqj̪#'\2LY C]=@|튿7-jofX=Ts6%`ېÚ`rv,}dkpfpW=<;5ɚ:lQL?hcx7ZrGOoY&@te~HBd"kMOt뮎jbNХ _xOG磸}|m~3jTx6XCJ,K=qz#[[M}mÓ [U _G -"W=Q8eJ_"uv}K2KhINT,3q3$ոCEqRcdWgW3+4lNCRիjNb}mXzpJ,%Uvxys|coǸC0QN=yo1k.33:\O5ֆz(zm毻^>&Ym eaLY}> p2216Ҽ:Yi1VN @/ƥ}Rat4j(%bb$Wj[#r7\*4 Gvi#Q81kl"ho9IgΑdRr%͜b`͆'2'<$%D dOTS#A5, 2lq$ X'v3 ec{đ IB[NQy>?vΣ/28}oh(lM-fc2 ?B6rgu" OtgApPPBFߌqܢX(H(`ٷh\bu5픜հ0k"<ER}+Gl4;޻3+3 456ٴ*&όP湓붅τ_ea929R#C$&s$\un rL0kXpr#@<4Q;,X|^QtSs]N|,gBI$8$R[ o9'!9]LcQT(.naI6eJ^¡Il94_<F́F ^뮍>nw(U[n^ n`m}ʥ+RG> 8< (so9N8R*(uJYWMҗ%CC,hε^:z--r] I32%$%Н#q"@>pnwc̈>:GZil\Uρ?b'nV9L&SfF5Ba =?B>fmّ{2=T 8W8 |œ3w#?3܄FtGSoħQPIltnDai [2['w Pu-T[*֦Uc${ݥ^!.I! \+&H7S3F8Փg!8ԔED$@RQ4H$AIPI82Os=-ڼ!j z{Y;S'H r#ۄ#\CМ脴[(8&HLL |?$WBH%)P|b xm T(oK<Ϫ|j i~Qф:Du GofC&oxCG+-Xb'WңbD5ΏV<"N}N I,O'2IU)R۞Xk`!?~ szꪪ^#x&N ۆ= z<1L,7n48ח=)K} ־0K tų T>yA>N4DAϞ0k3F+ND|2 3I1`/p /?W)F̟36MyxJq#*x_!.cWNd~8(F/iMX\ T "[YxUVaÝnEGQ6\bgṦ`Ed\s ieYqgOYipޑaŲ\3p܍UZ˚Rʍ-VTaS1"d%T5>4t/i=ff[(:vD?O>WOD{qSԿZ&J\L?"4AdLyd7 Em:q hB %vI+}0&# C2DW5tpgm?6l.&/F hsN)sdD$"!ٸl[4w]wks13;܉M<-M'观iz#arψtx d8;I|'8{ jM]$( *P;\8J\VMYD7 f)MjF(R%a-DfA<:s4)(iOdy@L)?"IDf߱)W1) #A-2OCW` gd bĘ3vK:Thi%00wyO/qh}mq-_5ew͎/⥉ȿ`xr҆0I8|+5SʜRqF=oբ%981 &B5b<,q1-D%G')""(z8Xh C9\QHxcpzT109UlRS9t17`9bؘp&@(x^ v5M=bB1^Ki<6.TsB]jV%T@G$9ǐ~JUM<@h$8`w'UЖつi+S؁GTLqRs HF`&dFb hƩEՈA6/n. #/}G #LͫALXSI!lkϐr{} %$?cPzңǮ:/Fu֏GRR L ̐z-K&(y1Ή6"- #X ]XтmmQ@tiFAY =JyL"O=d5C O)鸐 C*C^g+wpṘG; T4I3J{ <sqWN#Ys}- !OdBˤ )iFI@BKBp azDJ ɑx=!N!Ż}ED 6ק%4Xdu0+*JSvP{G d1N.|)E$ dJBbKSE lF51 Ap բy𞿌#YV",N|3pi뾤*i +KYjH/s5V<Ŷg}h3z( &gPL0~N]"XO#'տB*9:B,%bg^8/ *֘j9慇>N\ʠ\! J%$L e6P (iB\P(([?eg۵05)6xgE?Ǎ%\#͍v Wz_0 TVB( &4TMQPT %4M H $ R"ТHJef19vEzK"rUT_0)n!ݍl= O \zfEi^/$M|>4*)j$LE iD;Vtu79|z@X8Q$DqbD'5ts75?=x5skʭT-ӱb$UV "p:)uՐ-4\UCɭTwȨ"&:#km'DO{~AlHc$Y~q|8a6jF폰ʹBILW|qM1EAH$rxN?7w넇|P>Ծr#e'_\z /v.]`9'=<(NHP EHzXY6^N=q_ c 5BBq~P>_=,] 0WAS: $w=ݶ|3 ܔ{pӋT2ߦnDV`XZ Ys(niR5KFV*׭h F'֗e6I\.Nӻ ys~??#Ŷk4؆C-^yݥ#-6Gg9k'&4D )h(S@(-ÄjȽG$TRS:I|gϩֻGa=y;V@|#5Zb7|?JϟFՕ5k|]:7Zmj2;z~8i`I'zz=Ng .pmq;t׃6hHޜoշԮ^ZbeAmqNnI!ȏ`'ß ޕ0 _h1>@vAe}J5#Mj|P2+$K^f1rq?$A_d{Mqfg90rNUiJKeO@GG6`>P/I¯'< 9ugr +l5#=ٽ<'HD1&3Gq-3wj$w'#n& Hk$Yƫk14,iC49CYFG}}jLQ"iûsb=Xlۥ4&c~S`ӧWjUԤPCQ*{LT!';C뜃Jr« ?q_h} (VԀ::nr zG9Drnr*[\t|O:Ź,{ٲhD'M!8TB}@'P:uoi| O0dv6bNȥ8P=%Ip;$qM 4VW_ 9V7Oh:A+D'0#<x>+i\%$" dwy(?ӏt&YH+ IN;;J)v5@|M|AV1cFw_ 3$d ()I "(A2 D&D<54M~:s>sjD 9D'>Q)b_q>x)U hl[1T~ŽqfHjDٽ #f4g'P"`l)Z bîmOs"dv۫ÆpXt5j@ 2aRFA~L?u &vFL:}Ѽ @"x&AB*a."KȞwpu~ B` H7ac! Z^ 0+@>}8uI2F3D4dpK!X BBI#6$-F\Y֔ȉiAx#wf`?h]Buz[#XM1; TC2 @KeEoEg(IPO%G7ng1Wm=λ8'B?^F¬}PN(|o;<СdO>QwOQ>%A5|Hܒ b>CBo)\)٪91D_y\"yခ:HPb ,&VL`Fe}L,$̜{/# ~\qw@H!79uێErou@;9H @ٵ^98SD˺$<52wi2Bs,Nvc_hUbCg=zez|#'SPf$v}uCc0^E1$@H0?~D7}/pw?Qo*E PB~y<Yf^x $ʶ#NqBvYi& Z?$u{ hQdeQx kN#i vMo:*ș_80~bF`0Ä*h5xޕc$YdJ p@*#MHpH>k6KIwζ2>8g#BSQzX%ڹw%5i,tz4:sӈЄ6ݝ'de| \0y؍ ݒAlE-? hu߈8N"K Ȋ#ʔvEU"r2"#p"\Bl<YROr,Hwi"j ?l񆦪b #JdeAH쓬JPi:?1hnD4|~` K𢩑lW\=Li )VX)?^3# pP5 yg3sQYUzTq`aNA!铳C<ʺ0˖HC lb0Fl@ǻѶHQpvY8>6|֦#cox.p9`,UB9] `$VnPBz. =gB2OW7dlj|q!;UN.km`Jwmӭ8b`(Ѿol\C;Xta<8~st%];41 h ·`QBkݜAo ,bA?=i fM j%} veeXX=RTbLb=insA] "SsS,7!F5rS5fF|VlclO4ODíT:EDcUKѽgm(Tog0rѠXcG%]hQ^vMEb]7 ǭl,!n^k'B> FFo=/S}\kQ3*.K6kbg;?J$q]P9:Q>!G ӮG3uFJ'E ?~ȩPABCH "PQҢG @HH82tdA1 (ӀPuv1'Ak0+ۍ=Z@ 3*qGAms'1ddH@oܑW~0cXl 5dFbaj)"X%Rd G?/XMf%>l9N`DQ/4`. F΃21uHC tր{ly&g&A^x<\8&6bqhT_DִJgdfH=#_ T/c4;)@ְ:? 񩒟$Oެڡ?mHv;;yhE۔|U BC&ʂjBQ' ̅eՒel'R0G9疬Y:G1֬յa%;kO%mְu3g*(H̠uM DԸɅ6hΜHڃDlk: (>|%R#tKߧ#M-LXUt2ҋp OI1|;AU<:'m.H`?Dpp$XVZ:Ʒyn|%մN:77\"P> 8N֜N>~vD^L8D(\t'@2x "KCBY#7wK缡"(VrJ?;o;80獙\meֺF՘f;sz{Gi/[ wdG'ߋ*d"FP呉Lp7 !*1)Z҈cP.ߨ΢D$e1%m4Qƻμͭ|}sM>JQfڮ@8d"$ֵp f9029"l Zs b0\;,fL?:0Х;\~G'&b qq ·TenIPbPT6[DJBXka1 ]DA]2ѲaHAJQIOIß`, CwX~ҤG?_5ǫ<c$QӺ:a[2׎C~zW; mt+򌬎5x=3O>?o6J3w E5|0Issx\^HyR2 rKW ĖTCU¨D(vLj"sQV 42cFAMt᝖M'4SE0dސNJ1np8u&8 ]Ɇhщbc&xH_MHR$zgm\WD9rDsxW$?0Q6jc(OA }NJc7'#ОuᎮ} @2C̓ISW'ySi<#|'.Zt$瑩1qgM!ucpRpȻ @FG"I46VU#9m%NNO=P{,}ρBt9X@33&U@ꍨB7QY%K~81E1$^xU9[KI~bh셥1&h3g؉!j% 4q&aZjtSPL0EQ"Q]mj9nZ0jQ $!Y ށ><'jMZYNf3Gy98z방?!vOvI]EP$%<& hqP%4U DLC1Inq'%4N$Ԝ;̉B @DB B#0CDI$E < _w]R~cinjQ9/ R4(P҅ R4 %#2wy/iByzcviwoO|:͈LԴ%CU~-$*d IzLNG?q$NdPbWRG#xiު[cQ&TL$lf1zx.J{I?{Vl0}1pbSt3{ &N`t /+PBLT=_!\8CBs\FcAх\4ĦG|5Ѓ|1WmmhLٞ$}g:qt?=c4Cܧ@{F qL:Z['IByr#=f JlCz!TA uWjo?>D$ F@^|:@۸R0:`^j$ +Cte(*2b 4I,M6sM7d"h2$q(RLv cQ有(hB Ac&e>b8S^c}X*"&"H$$%"yA=$Yb0!msIlx T|ǤQTE_jD_!Oծ)Z ZV%!c06̂G|G;nm҃&.dEے$7@V e/8oeT0EuĚdJKlgLޅupy22|BLGe""}wL€uQ֫^ h9rrR9x $SU ~b1o͎qûuIfwy" .Eg$O)6,e({bNg/3qM"灑 uGSrO@qa`=rcX^XᇈxZ:lMDѓ'vr(#|*! ;O+-MQЎoq[% C?W#h~cFE͢JZ[E" wn;<890$&\DA%T0NxV OZ[1U%4͚ jӐ !/QsdH $K]Aũ xT1!pMŁ09XYp=oe\Ia XtqDZ hoHh*c8&&"A1sԄ]9$6"%~GSx`;0ִF9(ZqXSNDJvYCQ(m1ZB.-6rG8A$Gj&|cr,ŝd`5وA !R*@$ZAel Dа(جF]"R1 ~j aX#s1bίל*;̭ĺE z1M?Y/|FVyPU3<;y;a4.DSA&@|U^fZuJf+ P!{^9z N_=&_8>+ &8IipGCpA1Rp{c;9/ۯTq/KVϩ# fkH36d^8&3HUM!"r:2ԮlE ) A:x+g6J A}w9r'o]$z%"N߱ަ +Gy&(tv߯((isΐX ;`aUpxDmJXp&8tu#sQ,:w%d[Ð&r;/"πbw=Ϊ9PwѮ"7\QFrmrgn7rD-/Y)TﳮrHIJSca9RheHs0w4I͈3bm43PT1GUޏT]3`bJqvQSʚc&b%"f7ha5|1)]uq{ˊ$ pu],[.Ӯ*ܜB}r2qy92ݪ] 3\\KènuG㈩s)v ph $IT':;qԢh!L 6q$czTN8pmlܻpKp3{4J5O, !nx#86tWC2OTvVU Ԯ<@=7$!zD dksF Ǿ:yewcdzeާwց涴h+/﴾j/l$'>jyaŝ;N?>{.wĖ$~7n &ք)ˢb1b.K-HfSɹ"]2=bB=pmAbuerVʧ2X^PI[,7$S(KMOXqzY8LYv0GTz箃G&TruַԵ]RזV)(\^ c3\ch =\pV"=%qYoahp`C (mi^]W0h!w|0Bcql2+*t&ѭZ}0vc< I<N4d1EAݘ);AD9VLK # wNjF9 L4r **CӱM;P(6z>Pf<T4Mmx'{ӆK-o[m&ۭܳ#HG'Rgi<ʏHwtw:IYCjF;li eZmFn&:6sUTQQTTV""Z3S:ސ~dp gM{8爝trূ;ax<w&Yt%hSnRÆFӃիjڰTObON-@{l, "bǻvƚN螩<9;H"K6uw:5r#SԐHO#Cڕ"7f+N7cTp }LrNGc;Є׬1@\Kz݋jpw;Wo5Vڔb9]ɻΉàGN.$Q n pۖEinS,)N: ^U'0 $=Qb*\ DDKIAEMzi hy /Fq:{2灴 !4CG4чpFZl;%B2|#QfBD%ŝT KdM*|8=Fx'<ț@ϧ`_Dv А H?IG#PFd-~p М Q&&VNXT{x*Y3AZ3 O}<%)X|31iJCg.>",gh[ZiQ_ߜohI5fƖhX_L\oAĴLK}zΑ>F$nbry#LTLaRg#!*7M¢;ugJ(Py^Nxʼg>_< 3@O:v(A#x99 m?=k0|C׷pGUQ,e60I :QDO5ޜwdIt=jP$=%st6;*\ATqjy+6FgB\״I!jd1]b~AjHb(~Wpnu^nA"&Qz/Oct+;,[p;$u3>bC윰%Lz!)CP`Agt=.8Qy\j (AV!Y*ZGulqU+a'J9v\Qdujʌ3VWkipkg8Dwg7u_&hGJY'rlxqpb~ ux*\.^\riI,HIy^~'߫.}W5~C^iz{yGq:̣ӛlII;:lxUoĚM=joU%ާxGW<O@fϭ"ZIơrfV ,i`?L}H1Q;~S &j4ct} TDa`!v $W'irPL?RT@t:H}F| tfk7X?wzHJzHS$ ;'Dj$$ci\rj-MkC&;hv( G!Mgs7#xAXm Z@j3bä 2 `*" b!*dDT$k#R UO[LS}h*frbRR"MixF`) F'Pq|]*:,\%ܜB ;2'RS8I1ⰸ 8N C \ @'F/2<96+ho3nObQ3؏=-:皫#yUQ" Bo M$ICBb:m`$5A!5.HDv `x;(8a7ёd \CPB&p-Zs4 Јˍ{C˕!COH`S+ g%J m0NX-0؇6ͨ6bf^lfBt BdRxe~G7f4F$JEuz tYOgSۻYpmSWӹpo<B9Wã۶};|CsN v 8Gvn ӢNeP2~C}MH5)W%rl `L)95xTޝ3EXVSF.Gy21DJ>1 ;prgYISNxmZU(#0bi:lK#Nm6[,dm :*Z\YpH ŧu_IJX4G&kduC*;x~W H\(fy霢~A9o$A>WZ;]=Gh|ɾsͰ,-[D=wqŹN,9 xGy{gx8>nQ]r^7r }xM.{>A! ,*Cyo9HΝ 8B%^ iYY͂3zHy6:C˱,A i_06IׯQ۶x!yZp58JFuR6\DҖMsecWd{RLXvLpA\LAq+spxHIB|~~^rj4a.5;n0otۘt0@1?hݺ`:d^ҁLG;'weC[D|:H:α1d{ &33v>?ad?n&LK(4L4Q a|\x-_Ο[;. g" go(G2LB 8@'9~ ͇NƘz<06( ԌFziYnr!"!ϚPjcr"ѪGVO֍B;l<# ;RG."c7,P!Ig!4]AŒA0ͼؔrTkIi\iB,9xvIKf+RѕΩG&NkU=mͺ81\jq dGu(d " 35p :(=TwqYYjugZuk.Y:cQүokzSJInH~[<:b'8i0t;tUR`ab&k۠Snn[UYd܎hy'sf"yI:MG$ƓC˒89Mk8*جuSs^n: ߶,y!cd@"wPy,WE>^GHHvRR7 Ior>+(i|֌n =aF"cd `?'#W BCE)MR}^Ag׬W 9tnPxLL2̿VMh4/eH".h(A RfI\Dm0@)&*k1,vՒkܻݚ?Ǫ8>T/"{꘦80ĥIԟ#ʊ8D;'*>pGwa!+!D5l_WӞ,L{u2 ,TAL@PA94~bEzoq,.XwގoF4{Ĵ{%A!WP:s8h+gf7xk9v\ @A$Ɗ Iba)'><`$fd"=P KJ`2bNUp&%"b*NXH1CiBp f2Ba0'Ę jY%>YCi?Nbo~$ސ-v?~?g|*;Ѵި]R~atihRedAL!PD%K`x쨠苳S4ċ0R)2IA@*P 0$GJ9!HT/*hPنyc;rS\<{ѱc7%f$|M(DpC L7.vlu?5j,1mw%]f0 c|噾zsTnPy_ BOb2|sM;c(R -_i|]}~WE}JGQroy43cR] "kchd34 ~ne1`TWz;:O?Q!9txdbZNM*y%2:up8 5(WO&YbWYl'Rהaǖ"2*a+A@pndad9O4BhD*!7O=?aӅz@\Ng^߁i|H,B}<&ܹ$ xIH-E7 '!1$xG;OJqЩ!@x/f0)n&߇ِhGP2m8Is P`]VCDpRHWs Q8c#vK_HR9!3#|)lE6MɎƋ'U LI·C>~_-݅9Q ƩQ̍s.1@y>z(p=i$vMXJ{!Ьh}Pl:hA<h*$hh/:sPt!bfWi "s~6 y,:O^-ʱݪtf7VxQuOs33)ӝnKu5`Ou Л2-(?rr=%3/5*:zH=$0{!BaFqIG і2TٚzՊ3Ċݹbjhí*W[~Sy-Ξ޼id[VhP3a?_QDZ걞ԀZ2&?_^U܄z؃/CFu;?`@ފ"³#ɞꩻn`!22LAU@U;8 ZHrz{{z1t֘œ521I:1{m֝w;&yw]G.aqnC*G ]xp?]BLLv>>TM8z}z#<$vvPZ ]5.Vji&԰{ٶDS}]WMQM8N5A䘼Wn:C19j4I`7sᩈ(!X:Li~e*j-;!7P%hw1$R&(׵QB"5:tNWBűBm[jpXFXBj#4ovlָMEpȍKfhMM;unɸĺY8zIl |N'Wz/,Nq v'D$z) X0A T.;4<EO<,!ܕa@ZBS#2}'l^ AQ'C%$_ dy<_Q9Y~12LwG^ވ\1]1эV-[+ lZ/*^`u،m4bTm`^9AoO.4P<7%L)mD}׻ hZqfvOLѽ.nF, p({cā|ҍA߇9;z׈jxh;Cp}4wU7?T}cOrx,L <~'. rHHH%q}|F&$",\=@g]vQ\<;ةn QA\HP`Lڅ(D-m)RAN@Ԕo/}X3TC|A|Bygخ,TS[%X晐D9|57M/)"A?茓"4J>WsMPX!)acS4mu6\MׇI`ՑKFd>5Ə?+isiJdcJ{HC|A+(L$Sǯb4)*;a~0==mA5PVD?8(ihJ)m͇mډN{r|Q8e4mAY.>;!"+ y(Bj)J()Ehh@ )i)b "&M iJV*"iH(ɅL:\  J NLBPR%5BJU*HBeFdF 3\Z%0;gH$ i Ic.hb@(BizAJ5m944A-?0PPQ6}{|O*푥zw\+ 9I)"cUPDTDb蘇q7ywv5T)E{9<YBHMuc۾eW ΰO%OeIgGہ[sPE/CĈA뱍'i`U ?Ϥ zf&9LDx:9 .?'s2I%*[tS{7GlNHqy;S_Lq0R lk;<:CP66yr&\ܢ;r&$ҨL IQJ2Z5N^h@\IIF4Бxm&&fc&CS$^#*^e7u-Z8=|)C+oСVPD}r QL52{#܇-N2SܬUpeז2I.}wv Y 9Y }f ŷ"_UM{OoZ~114#{q.=Oۿ+iwG1RlG$(Jj[. R\a'k&郁'S«t *6(c>'D>O~h؀We#mn9"_밮Σ ZVkJZdb[G/ԜFUJy̡T1h~.92"[ Q c YOJ!J!!M7$vC.XI /{, PR@4J$CG8Aw+,J RRB{AaO<@ ү@]ʏy9!J@;>ȁa==(Be *ͭrGtR5H4\t)XCD59X&,*U|/;jTdPOڇ܄[a8,>ALKoDw"}%;j.7&qáҾ;C& Irb ~y LN҃M-+R@{)H>o{sU4Q3პIxpꤎۻ}Y87YhSYc{yw]%<.?S짥VS8 8T.AKG4ns?M2Hjie>뽲~;"sD(xYλş#۸{Ÿ{'w ^s =G(H D4pI>I7 > {N@aF"l*eyuaf P{5 GPCIJ4YF߄MoIf@");r(KEaۡRa{U#ƇӋL0=}[ O 5♁ \#S.4..Zp3^,o*&RqŅ٦܁Zlh6cQAJ{ Z6`}S9H 1'3*"+H8A`gHZmŠ(!\ȄJ$DP I&,+[lUD1 C,G`@xJ<[xCr!"ӧN2}/=B'XiAP,h>;#_\kK]9j }%E ێH/˯ "$/!~!'HncA?Mxt@2tdE+[pʂD{Mqvn.r^*rr}Pj(7(Me'S Y9=݀H 3̙$*yNֵGK H[( FOy3j h`,#+$,PBi 2<2Xt(")"szu y &E UoD=D/g 0>QC}=e矆R:d3l> ize  (p:u_iRo([NR5a zy['|o>ˁiN'zɻ`?D|Pޅ'3  Hef ]+2S8pl I@e2@4 J1<# j-n;&iG'xSC#$ rR 1 : !7ݗN|ˇU?D1ShbS¨'~-bi ZTG)1j^0f T= B" bn-RF:zv6}gvUȦ@8A枏 |H-rnc/WA F5=}8А'H" "/#E6{ ԡWF;l[pC;{q)J*4GCE(3 SrMxeƄ>uw>3g/Zgl<]a^l.P՗y }fx7`>ekEcf|8Q<O^'"Ѭ^G7;?| md~P;A_.$cAb(;^'eQv~'HQ ET\! }gRXuAByГm @POy8J*Բ<1mYԜ Q~Xt'lۈ{iv! "E&B jJJ%YHe!*"F%h* R P$ b f)B))}ɨ `*$bH( "R!"Sbe0151'T=Dx"/ y瘌G3ME0EB I*P,Q 8¸z} d ~7\~{מG i_~w;&OPDƤTuȟ ǩGE">m|5~{mC[0Ȼb ǫp{:À52Gl $x7,8m& comsW3(sd 7"@/saS!QXě"# ʅȖ0QP @"jS`G.{:×F $I19FK$9HĸJ#KhZ^psucݽwt0+^3߄I\p]'}ufv(F߳8xV!7?p7aTiq]Y| &1*.," By]7yZMeGvh`7xd8ZK6FCB8과MIOHrK޺NȾ@>, qo}lO:l$X09HA-q Ds`(iU4=BFHI>LB6';Йj?vi$FOMUv 5``p0gѧIO`B8Sg0!)<9yy:7umkBc!%mzdU`]7"#$֑yi1j7r4aMX{7c6'П^;H6L&A7dQ ̚.I.0Ehې=;$bqv|dJ6gTiHB^t[ })D1-Ӏ06Z<=*$tI 3p-Ch' =\>yBb!XigߟN;?8!g^(kŢĤmSeYu$ďq@o&n|85Aozz)*8OlsoTUEyo{CQ,ʬ>XhA .׎5Vo6C#b}C1ЌDJp/~Sӊ0G>WP$,=sd^SAI*h _9O0~ k5{EᴦŅnO;E>Jb.0ʔ0\9]=d91M*Ìƕօ,vu4jc8p $ ሂx/(YPbs"ZFCh]rAF` GD )<^9SxrHDlЁ-)X !X `FB(Y=Hg^Eqmo-hS.N rhat" Tx;ġ7NhuW^&CR1l2MĞ Gl}iyFhu]ԑa߬m")?zꪨ,o#5:ԩ݇J|O~fɴ(kÑ8!:M̫ >6}ugx}Jt@uamNQ1۽X9T ݎ;p9w "(("x10lLxA쳚PGq,cfy{v@:AB.:)&WwnZn;'rxlQ8E {rd:m3xPe FC(JH Lmv|QOs-_MrXE];5[|>*8'J#ɷʄhjGox7Yc- dS`EEq>s~`ِo|23H`ܦtq%#?tdϾ݄=W /T8݉Gfp#& <:[9w !xp9Ϗ8<:n=8"wwbРB9d1rr'{/b<""#0X<&=NkQ!ˈT9lB#4[(*^qU1Br8Y#GL1AXtW<v1GƸgNLI¤N8HR4ڒR (Lc߅{S㙉S\_nʚ5e1;3\仍m92q2:ަp";;@h/JZJ'ޚ]Ybv\+ʚ,9h'e9.K )aO7LjXq1ƭ ( At3R|X `km4q`5ٴM&2xɼգfV0BA0`r.2JltH $P b7 nӘl3$G sjL ?WкF9!0!|?ᮡs9/>"xKZP{C=<*'S i2`6LN#wPoAy%D%>|MAPMf_Wܑ$XS%A0w oS~g^NB0=~{;}AT7ѭ]('jS|Ï$Pz! h,Uc"kju:4HtI=oW|#N}F >lG$tOȢ%AJ>!)dj*]Ge Zb!b=)<9Q&e)8L9tE|$82QiJAwf>[J̧qÈ۷=*v58<8 ʋyCq -oh-`R:'A;PLQ.0<^#шIs6Oi>i>{ᤉ%_q ! "Ei}}oLYUcZCeT \#%&>hq| nWq$IX9f<ڇq44wmg2/~Gpk 5v:V'X)8_ﱐ=rbզj(8`}ރr-$x{|o|BNy83~v~.V N=Դ5yW<x&>ιdG`pG`cG9#sj$؉:B ,rʝwMhU Y%F |hZ[&2dN"GOV"'2.P4Az/NH9P_\r!=lt6 i:n`=$K,K݂ӊj!Z؅HDOo2`)CǡJC 4c E "0P RRn;vO*E&H^q_ ("Mziyf3KQatns'|OZL!4@;ݘS?`ϊJ&}07$M0郄ƴ:\~Ri ~Ӿ-f QDEULa0ۖ&Hً#BG5X-~7sg2V1LuLnH᢮6<ΑE8&']FFq*0dL,I|9XBRehODRAڐ4ČFB vG,-TJJk00̹+D?Q-2 ZfZ ^_Ze2b@:8Ge371P2qB(<^qz ?K'2I!AB̍ DHR:Xd5H!lU?BS`TGaކ~pĂ>_)>e=G_^/ƒS<Czzђ+j!'@pC!RJ%4)y@!*gfzĤhvY$[%-*Q:r (C'v|w壋~!(" 1djjv#G@2a t7Tp?"l|;]vņ. Wqt78:y9|$CsP HcwGr;Ϲb,)8ϻu[QGh&x<\oZVWt'pA_ 7.jdN.:t<œ"mOzPw_OEvVa:( rZx~}&:FxD:w?P<`zp@qf DBKd3^:=P"=<ӃX}ݱGZ6q|lnV70ަHi#R׷tkik(.IE1r8NJ8קgHe~S..b tGhAoڽ {3֎JۋFYy<)$m"kڥRRL'|\5'5aO$9Fߕ[QJ> ǀlxp(#Br?c. X%|9azVY:E m̋MGy#,Ua veTpdȆjŸP6riͿ1w|*&*cmqwHF÷q4zX[%zJ*GЄ}c%RE.Y'#;#{G0ID($9$if<bc0cnCU&!y'pJ:ߠT41CPeH)I(+FG$0i(1 A u_X|=cS$ZJjdSbqvyuzG9}H5ꝢOI#郗Jg^<m8_  `c|N 6"iˈ FPۖf_[ REBb<Da$n/X$FD]RUP ESL;=6wO#,L*zg1Ztt Eme`\ gzӎ! @>⑥!D_>#΃^ .ևJ$H=Ð{lظ(@ \Hf^U3 D[tI+앃ZKByw n:ߗ>y p5R s8L|S۶,\9e)]g G*w^q ޣNdd樒(IYI0)abCcӝ&ؙU9nl{Jm|=?~tH3(EˆBB1:zf :ty|#-}/q !Pڭ 23*5:",&[H9 hz}{Gr<8|@ia)!.uKQV2"`"}E u4YQk7&D&n0o&Hx#Y5僕uuv6[C{H+Jү8/ǣ)'ɔ_y=ewzT5i1E&Pi]}>q\ƌ;& xdJF ?af-=DchYӱ&Nd5"LgH3C5풟*@NCzqD3>~"kØ&a$JːM &dYLqeqmR}\cV)LNې!̆7ln:@qH8'I u.;Z탶s.9!BYe cك{yrE9M\ HB 're4CN($Hdn ܓ@<8XSt).%8c?>rN IE܉Ħ!T$r 25wa}]x%34ƅbNA"AI*  -)1,K`2@8(!*X;B*H{F}cMO 6t rTTUb<'$'D[  qpJC3 ]9]>\tD V%K_4@ `hdiaY$bhHdH i)" )H(I"FH!(&"HZbUfe (JZRh df dfDZEbBH !R@BH"è"crK$$7t7ӼO3D?a`il^U !G"Т}p(iD@Q !@EMSJ x $D(I (J'-ʘa*`F!ʦJe9-c(0y iF@@@U D/}ՇY)MӋ<:X3B]VD]A~xOvǽQ#'f\ņV5(b_~'G;T՘v.CpeGz=z;z Ŭ5{h|_jic70I.#xi*%0>;R)B뾞c0<\G4@qƣ`lנ~Q]|i&h},&?S'? Ԑxxf.};l9u&#$Ö?9`uGBIXlM0# &% 4GK|ϴ0ph,#_9׹K`50rz ܛ̚_R{w|fٿ{pN$9Lv2$ ʔF ABRX,R5tۍ`9,)`> 7Vo M`\;! JDQF̂H-(H{ s B<¢sPЀ|8`!Tɶ HD hM !B0К1Q$CH2H 9"n,d@D]\2dhlX`|4BXVbpLG'Ibe[f!P90^\bV{3w= (k&sPJx5 BBҒ\*Q" P<5Ĕ E$J)QmĠE4yi&S4()DЦiMfHjR%f !v)%4 BJA HDA h@%4!Ӡ;k4E^E⠯p\‰E3pƄ/,RĀKC=>G/| K20p8>mhtҝ+>\vGFb$ri>P؄Ԙ9yPOƠԊș>C@y-!BpgKz)R / >b,~;mN6"_L04H<(Vm\ @Bj 'p~06I'^c96}d" zLN圧()g09!7NVQRF8=Y3z223'-D_ȥ@dJF 8=l:piFV~ ,Q Xz$b8]ItyQG;8I@@OH S BeX=؈P灧81'&6㻷~s d]2'݇VD {#%3: pwuѧY4d>2-:E)I\h+8Qf[=lֱxorΐ xs麃(80q8~la2Cz÷P<B0D](ƄeC>9q,uc bmOK/ "\1FR*=5zxDGH,MM 8aH#ΉYRS3B<"Z|eOGx>SÉ&N/1᎐JÈS֫<PΣH-$#I9*DF?URʭ]1P֝, F5"CB  8BH2n>MC"9pQtH] syގr,k W\P([F1EфɅ A#Ǹ[NڂYXݑE*p $0(u8j"pCN!""iua]&'4' tcdW2` \e + )ư @I!^2EAt:PK;~{ko߭pa ;W>qy=<ۧ?SZ A©(D>Bb@Jl P3,B?_t(/0>p'?O(vO2=?L X;Ev+rN;'%M%3D|*!q>X^kLH64MI SA K% a[!}_]* H4D")Ck_wbқ!+Ǡ5}IUnuyE טlVUC(k6Z5Q0Q2~_Ce隈7@NKxN6/{.5:ʫ:YG-B!行#8w_QIG|DZQ[ wIѹwK6h @i4EKN!p\L|@9,DLm&ĭ!Hq!=&`rRt.$ =ىԎ/F9nB#P?AnJ*` +ahuL_"(rǶ )|/#k5BPAEٗM\ sȀu=x`Sk TN_MHuY2*N8d3j=U>?C&cpʲmTA55f`jZ"W&H8S1'%h Z@cã!UX-GY$RU$IYJX>D/740/!@ÙՅO!)q x%{>9튨|`=0y^Әbjc;5zv;+@R޼2@4ÄRy>>{ B]Mtsp;H*K$C!d$>~߷~?vO4hVҜ;.q#sȑ5 sT`뀏&e>@ _ADmkF};ϖe;AS3% ƶqI!اԧPri[y&D/|8p/yY,MĠ(dg$F􈹇"42d H$p? ([Ha[8E0O& D|Eo%WKti\ >۾IP8A$JCI46| uLZ7\LÜdp"Eb0|ABjO~hcu Lxuy؍6! 瘝b':2>,=|n#9xw:uuu= a+8V$@Gঈe[N^s6s? ϭ=bOY-e$M$PJIp=ޥ>(:[!K41_׈ay4O&, "O没& A4N%,mF_,xIhU2ޚ)` L](d8Ï `:Դ 2*67? vFi8$*60%J(EdԒ,fMw~_ j~ÚSs(Bn#84_J$ j2>1 J]LEPgQ=Yy]6$@I.)ծi}'LGӿ!*){M!}\:j"߷.l' >?|p~$TV| R(`b %KT*D!/~:g!G>1_ж8񇡃E?!b֗|Z/ܴ1KHm!"8 t4PkV% `֔!t#eb$6&(/4 wpk a&ە%6dns;gjpYѿeeWG9!D*;DAƠqp1*C;0 NW M0o ե`dnEUy2$vwnOORتֵI.\9a \5zq1 K@Rz 7&'|JZ;gy;/ᤗ/ r.Iuiɱ-$5ȟQDD~/wvcN"&Iq;M&[aC-L'T5d,ҵ'3iDpzOPbcs=> /?= C s'UM! q2jj*jL^ 4Sy~R=>p|$!|!,p@ON~&xq QYMn_SLUK"0uHOxw c -mJR{S!bt8\d%F!?xt;&nm{[ncSrxժffK5uF~MW> =vc&OF6#"yh8 >])'`Nѻqoy>G`d#؎:;% HFxy /GÀ;gت@M[*ϴSBKAdy_{͓1zxC}x( &%2[;%AFv :vOLO RHۗV'P<Cq_D~`V`ry;I@płXhﳶF?|f+Hkp1m#8D{*?D cBܾf;g {xA2D6}*&'c_|g~GJwծ6$hqm{?iH,Njw%cǻ3v;]ޘ%ldd]Oes8^oZsD|{ UA?( )&SM4Ώs|%X } /GO܄D%Ntx{<9k-ay QP#^t挀^ &ˌ뮡h}At?:qaZ04qIwW)zng;XnZMGFm&6C9k2tZXġd3ton*}SEPl;Vӈx2iI4&0pWDdR?י 㑳0sտYaW/!"~" -!J!p9<5-b.WgvlCгA!1fE<=Mz$Sq1Ѷ[Y$uxHSoɱJ='xBnUs񚤎 hᾝߠ]k.N)@'G$I{YR>Pb1 4'Wu፛-lii5=搹:p|2n\~/p4dbC$[F44neS1*a #МFpu#(@}MF^>WL TMY|#s ATITי3ƛ"*ddB'XM/yY˻t;9jåϯN?n8Se/ rj TF6> IJA2I!" BP-Z%5m1;`T JP6_:1g w,N ñZZ͓Vi0$[vmjHEs qP+RrW%;a9˶^k )D"L*J! $gc#ۣк&eF pjjP#?*ml21"sg롷fjpp+@~j]bJXf"sd ZXa s0-cQEYjbZ2s]>_+봻Unnl%'ߤ5z};A,B5'~o(0P̮8z@d +*DH4,5Q4T Ȅ0kVihYCE%RHJ:( J)D Pd1P \He`B$S%7;\ӎܘU3I'n̑*`D(K9&\On&T/0Ჱ B兕cs~/_?ph`$sq)#M0bB((U@p* E\0{v yb.}lrCʀ!Ƃ!($(y/gdQR$B%`hF3O7`Ɔ8Ɨ~ysXߔ%PAn>,{FDrDP:9dQ9E@IckShtNk .I-$C eE\ލSMmXfT?$'ͱWUY‚K:) "# ~'HE3PpmOW;Ͻ󷼅h8$,SPߑ|PgbOoD~=2eN!'`笤Aь)͍d=yGlgEZ;GRz8NӓӘ)o<RCΧk 9t-_Q ѡQɉAVL06BUEFåcRʄRQ#b* e;TܜG}6ڎxe߻[}r3&C9cq)jT魍+pshBRaR]X-iE7'.ݼ/~&c n¡"cQA Df%v$q&es;aᓑ;<Ӏ$MD:b8]Ei$ D (#A]Eh6M۴KdHwBʝjfj4 KhuJ&$+Ic zގys-$4fRW%.h U8ema*ռr%Ҽlc94Fv[%r5&IT})49C]I0n#ԕ:Q!H(lrT!8QqE1H&9y%~QX,IƄ   LXђrC,:t[MBV Nuðyice y7h`eT81861$;wwCH/HAlWpBn]<2& c/c TDD!2^NeG=KI !/Z<@SC7w$ wJ"8?xlopG19dܷaRHHOx!ɮ ʧOS)yt },3/DnZymi¯EnYO`sUlhY,n#lܑUd%( C' %33{[jFjVջNƧ^e#$O`ӣː|p";ER 1il B|u poׇ/K dfaBKARݒ|)"*s q<,2F<+Fr 0#DD]''9:D8wp(pQ8P=Z;|_;ߧ>)qa2MLؾ$N9B{^aN̜#iGfi?")˜LL$' _&hNZ~1K2ay>ǩqHcr 8KՆ]n+T$[J 4XLd/V*_uzyn9-+#5>a Q$PnVHp$D7G`&H4 E4 A#c_]5 Z'`iw%FO4cFںǍmsS~2:NPT:PG "ܽ$ !A(\!x6Ty+>4f&dgcGeFs={Dpć 2@F+M4!Atd!"R t`dT.1-%PRk8XHOWg&kC_hy҂I\  ]/W)ˁ2WRd2*&dK'A4| 5b8GH A?Q~QwSe2e N•p8 Kuf2QECN⊯ʝb*c#Ԟ{F~ɼދ'Qq@ V*%TቩU v}x]D7HD #!C@i3. 2*>|1I$L, z(#83BPDKX6\4L2"~r FHHC+ oҊJH ʭFX "5'0}dH(|<P^) B(R`Yx+;HB@bavy=C:OO#=btn!\f%[$h⦑T}Ò/>5G:Q#>$H-@`"GN*!jO3 M*&BEA1$1¨$CaD*I<{3~;Fƺ$'KD 9""[1F:9T?J4[YLy;94IhxN{ɚ✾}7F`0  οHM9usj=-庯!Y@G) );&Ȑ$!#H;B ݷ$a븬 <#2~pJzH8.%@: N<$5HͫΊiʦ^JM:?ڐOz،2cF& A.ѧQ$QQ&"g/@GlL}~[*l?ZPW'M>W)ZY@r Jg1A8񞃱wȘCǒ-HKcB aCpx7EG3.gh1QU,*EYiM5kIB扌 y|FGÙ=:FQ{ X(Eٯ > $z"g*gEF*\uixׇئW21v;k)3Q;j5W1{a ta0(iia#\y DT {2RF<uCB%'m%T@gv-DßIu%/ЀK"mخIBσ CAt FEq "f+9sv&%hXI"b2FN!B6f-EHp&)oguGJG.λ߭$N>hCt5| YAo:V%L&` ۴ jb,]M. |ȍ Ft$lr"HEݚN^jW.]93bk:Mo&L$z`RQts'E9qG-̓uK<>}jK%NВnJk%,eN7QRҢ}o7if4[E{C]sCJ>u=I*.oe1_}n[͗J04/)ұLZ 0/ET6n0雂n\#tcbfdPPQE nO{5 ˜j8amHGY< BO qZ U,n!ň n @*vr%4G̛e6Iյ{ٟk5]rYJ>:8;zs9A.MpI1g} Әgha8#u2)#xܔsETGMa:-NƖ*|֓c:zZ{N γz\'ߢru4NtpWW Mm@gJ'Kk~yl{_ wlo4A9*Ud^|7c& (-]\6wU-4F׀DTF v(k+=JyNHDL jE6'gt{v+河sPpf\LbqTbùsr14J-RU'*9Z]xY b 5%nv P5fbQJ8LBp4 A*{:i%6 3N1g3Z[cQuYSju|ު>S&98g'>A0uZBG֧=.#qF~k7|M(y_dE(hSÿ]m^3a)4[*İL%dBVv$BIu" XZ51mb_X:WVPߜy[0` t@WC " Aފ-DE6ԙM4'*dC*s|~ixDM-a&-78<2}+0vMZrw}|{}C.=ŹZsYXqA]Y/㍩ºȇfF!g-A%*)E)gmҴ#,pkc |nUGԩNӪv!S5l7ɳqM0c2ts\p9]K ֻ7bX(*"Ac͑CMN9?3mSA)Y\uƑsTۄ 29C/'CC,q+D{zW[& *[өM`5e•{ҝ#LuFB96Dж]2Wu 4rDS3$\T"'Y=3ч~^HHv(,jS)(!8oBUh> ݕ{w/OV^UXHLD#C`Hud鶐ɨhё&i镽Hv"F.:w؇!sQXS;3sC~ l J,t;iG,L &b w- b):q\\۾8#Q$:KI!-JDMCf85 =8m{,OyH:=z: wS7S}d:Y]<ƌC,Q TKnY`Ŭ׌ð &?OWH :q))T8I6؁!(FHb·H4]}AfOXoc;dQz>WLW{8"( #I9jIұ{_rO;^V^"툊e7Ou6@?q FWb\4țHSDTȒx(xE! Z,E(SJ>2iL<S SCS $68TIdkZ96jQ9.apXL7)˖z>rYqac¾DQA hҝ<܉=I&+!8x}k6qf{ 4~" z G1$SOS{/3hZ,mnk-n@E;zƜ] pqc "c,{j:$>\]8 mUis( 1A=4"t& x,xf(G4pzjR\}\5_=a:..dc)կm8=}pyfmߢAD ō,w3pdxq^m/RCRx`{YW %!)ɑ_tc]{ $aߐFυ v3™;^ -nѣ)'鰚QwX V݊O?NOg󳍉)=<}YO9DV3CH2!t3o"T1}DY F:&O!6i"漣fOHL%9\˫U:NCCRdۂJzLCK^<0ZMoQV>Z̿wfѕ!|-kŭbCƏ`Sp: T|* 2J$(c^qF,=8Ē':8&mEc5d n 椎0[h^dq&iڽc# o7^|/-i$ SR>raQh伒?"~kIyq7-B9mA׮w-Ǻ+X;0#1^e|T̐(>|KBd͔ x|a?UFoo7;O~pATm~,}RrEQA$^g8GËLP!VDEEUUqQN}5oJ0peZb+hfJcp p~Q/qE0qDD)i 4$A3JD4);&DH6`i&" 鉼&q̗ک) )rQBB!CJ)eHBb@,0E b=|{y"uv*9]E !%>>8?ǯ/*㱃‡KH40;JQ$CobsE::I|IyvDEཛྷujch.=g%B(J 4sӠDlF0w8X_{Jbڔǽ'(W&&eF9 d>#9d}6'PMBo@l*zJ+T528ՠĤk:IPS LWppmG.G0m[&2Qrgݣ9@aU~$ D)}UІ/O~mV1)B1*$6rk+:ݽdz0k3 oS  轌eM̖Ƣ(!7_l^,mkq6j}l 5BG8ԉՑY>z5:n1 C["***Fړ%޳]!9{ndt2NQIg7+ &;pG֦KM43.S8tZ:'z/QKdm踛 ҪVT18' s1<_uԭ3լ+@:ÇDE]AfVP@DTQ(q@O^X<GȜj% .Q55{ǣQ={6\Ϡ?3wMx,:bR{M40b0F(dȶ,V]߃n>v D*ӹas t Njb*gL󖹚QxU28umM\ʊ#kXɊ"SȖ-SUdMCUFl HXv a"MG2IeIU I /{ftÑs$%Hrw3ʄ;$Eә`s<'5(vDb$9GeW:hI[Et@.Ō$!BXі"Vr1`YEKiL8"(R-XY@!DKmRt.4$*UUUĭ2QAFe x!x? ?#b1;p:9&t&ŭRQ!5݂A\w QHn9JRc) eu GSD_8uc5)?A Q뚾5Kֲd]E*QC9:qE=%6֐{ĹT_r*|pcL>׀;qĎ*N&Ht2}f-@%'s9)ZTP8;v{zD8b9zDf@=#os|Å5™VOi!Omӡ0N_Ӻ|ǰ/_՚䛃(1%~`hykp|"[YYђt2!wBR kہu&L+p!N MXӾyzœ K\gܔ Qv(cl9FI4 ;&c1y1`G6kP{?vۖb{o|}x%r7PX,I%8ֵ 6èi4}|Zekx9°lU,e#(|n@I; / l} WaG.Y'I_:0Ab0LB7jzW#I534ƥӑK8Ow;~r\sna/_}Epڄh]]'D^6?=QlI82CSJdy)Yŧu~D<,b2yߞ/x]jhzOF{V.yO;cuK!jLLLc'Roxa=yFsCKD KO}~/㉙SCU[U:ǡqq[w9)|Z.IjXɱ,{L&pci%۝n a ã H2*VLW079(3WS[6gO;KĨ D^uPa{kQEλxߔa 0T4ag$LX$(JO9QC.!p4zF]8LbE.4P:,%:U#ԝOD:ʣwDk`9Ҟoz8<9:s_vw#Y Y;N"Y&|MFag=YbJȈCY8- )³}],5]`ՅVфJ)7<犃v;t ]sI7Ϩ>z~>FqqiZMzߡOjzs@Y 2D /G C3-aP %m(1tK|:93FģpDУ2 d. tJ4"Yb ($` @%[c'q \"ӱ.޷A߳HuB5ǥCDQiq!ǿgNӉOIj+:QVX.@s`zv7+[ "z!$lN9Femtk8ACBQؒ[a"p%Qh$˺1(t!I(3E?2Lq6(末])7k`Њ!v?CQP BI@]:'{5b3M]EYElQj㷑A=(|dr{?W1'm2u1 ̜Y3 Z(<\\OGP4ev!Kt7x75Zp`+ʢHkY!+ZoweǏ-vvM.jAv$J ,E1L7D6?uEQw)E2IEon!9 Y8ƻqT)9nhq/(JQۧ<-LLw]W5nݮc@;kƓ5ڻօ<+qg뮢 D>p\##%ۺI28r ȁTjbMzABRqC|&5D ǍG' A答.۩]iz9TCVOCtqupUbӐ@D^tE*!#@ @\- (AEhB@4"X6$z9ai5pCja]H9,7Q[JCƪHi#LiÈ g.Ku ׁeQm~N3b&*XV1)xU%Fwz@e^y޹gd[J3+i N~Z $2l۶>f.ѷG I#|,ZD؁8+Q<H \ B!P*.&.B.I*[-`I'=ax; 틿wv14:Qx(a"i8HPR͸B|ByR7<-xP%q{ka'&,\ 9Q&Ϊbknpv7:pԍ:n綠֐B Yh6+zNXq3tyIuIrptiEM3$0+Ψ9IBMK zk˻?!G*%vN1#qeiNA1ḵko}(4ɸ'_tQ/N(HrxZJhba 4 PzrwNo^5ms]dd~Z^uPx#^Q`C)BA┴ee* y7Te.x;٭$dH֙si=4ֲG_#aFCX"ڹj ӦoQ'ӫox m|j|׽Mn.|TltSweBK&*q5 h7OV$T܊4G2v)^+'뻶s;[HTk]UJl8Pl^:^j}% K׶uҝ)ژPTb VSU;,s]w)w\x6fEu{GdOs5Yj{B55)<Nj9KbG9У),rḎ.[rHyeOXqa圫,'Cw69pYrƝu5<'<ɧ Ss7]&at-:8}&Z .BsMxyl6t\r:ƹ/%zRtAL]Nlbth=66(r܂$)f fA9svI4B=|GPa?tJa'y2Ōl€L&KG9Lt@p@ཉR,ܙ%u,sa⍛ Эr7:bぼ9Q*6Pl4tVL\әB`rhG;糇C'ΥO%GHs\7˾; bq0[|ׁ򸯬|׸9Aw  q4ZD@𓶖E^gI 5)&dr}0ySzx*h#xKc@wqR0hCA=AvjdG3U:$nv!0`{hĔ"tb]@sn!#&;N߬ӌ"޸u!rIWnƜGu朹<P+A5 A@P+A)S/Paxkf;7#BLZJV2BԵ T(()(@4H& R Tmm @X=+h6Y,YDG v(76 ] _m'㜾{@l^ Q(m'Јۛ$ͣ Ri<#J :H5׃ >tsiG{`K׷gnNxu悴@R ӳ/Ǣ"`CnDAAFgx<$ BByDjb r@TM|t6T{Nc *!4(TIg pgxgsD-ǫ'~GP.좖NCȈosx;xc8S΄ ;=p nǭ;n.<(NGߏ14BЇ2.:ݣA+˄v@|}Q5o{E): b BfP(mU@t0q#ve)^G5NܫUHo_g5'&"")D Lj= AѢNsN6KmdQK} ?S Od#Bnv>)ar"식t vO+5(znHZDd Y$ܤ|o~30 $*( N gBCzvإn &о^of5vlH!P>( S>Nv(Y;&_;-]_ShM_Zh Kt^n&,Ri5'FMjGߴd}00sD*%QCsnLJ|;$W^I 9(X`򷸋 `Wcқt"c$:?=Oۜ Uد#X:(ɸ92qG6>Ks]gJ~>m'Iy1_POG˟3} ݷTzd]5[᨜"]BRynW X(ƠEG >Jb; 3Aqnðq-{9} }TH|8pC_UCdz(;aGi r VRBIRR:?clEA+q" AA݇.$ YI:\l-M"yIpP\xIGNJ :(XBH (& ! Y0=}l nA4~0U'/L5|i }dGYyT$B=0ְ CgJztw;aЈVA[C(8rgSǕ*<؟cX]嬞}9  {FDZa0TmA+JU4l': 8IM<7PO ,@&eTO˓L'JkwJN=BwNaֱ5dRs&\q}ozM>#TQPM-Fcnijq];9c9Lwh"}ޥ<ށdeht}HjOzᵮd4A1ޠIS\9ݹ.Qct1c@I.O#"(JR˚3c:KL̠&cHprNsJ^"@iCInsskVN5YUp? [8E1$!X}PdtT:˘$)<Ƣz͖͜5usCkJ;v0n{׻ypNFjԩu>񨤂aN5xO]&Ћ ($ȃG$c 01!%$2Bd(L&?C`?3ä.ɸsxLgrU(44Bs*!PڈD $+o(q}"q-uh~p 8tAp|0xѸ\zSp$㴜Ju{e9!5@z'N#.w'MROVoW8u|#CР D(}DB%% E$ 62 fH*ZWL`^sе5%%&؈W쐔")Sӗ=?jV4:26B1<#G r_,q>)0G'uϑ7<~6/W5% yK6 Xډ$M0ΐU$_h߸Ц%#҂H ZC8/,&E7HyvIUKQA,E :(C/2L?NxTPWS1;?4>ObGeG6T~@|-pkƹd;1V-&4DQ`l8B ALAmwqs#(xi3UeDD'$4hJ}R= RF¼_`?v>rr>,rEZV$p?'Ŗ&٧.n:=)93OH)M 8 c},yPO{3X qfd4.dSLwӧNvaE re8תꐓ@w\s"Aߛ1]0 0pp0|dOÂ:S^|}#S6 1 c@z.uن{y\ $O;>>m1)S9%PPP'1p(;h=~]"ߪ2icD3=¾ԉ}dDZ; t?{77[0TjhjKUlgDȊ7GG! Si(bhP2U+ JFe2hoAYԸ'o#xI$}cH3`+DhH:3GaouHP3AUTZ{|tL}5>C<9oNsV t>7|&[yO0c2R׆2Y`=*!eJ"axhF=%{OE-_ڛ";bUƢ> 8.gCf0XA2T@D"nVLBi?Krʇ9F\r ב4h>O? =">$ї. y~>ԖD0WŶJFf6%CXGGgDcryQX-w Q mz.Dl0 0ÍD0`<$#!O԰f.<.]m!| T YCH'gl5 ! )9$2s巯o>\·TDScA% !5 ^) A RbQFH#%aQ8c u.80"<_wN.5z-#J C;(C0Dq0ov.#0Ȯϗ;"$;xsw.nr p Fx>t ؟q,eLÌ\Pb8KnOdH\"! xR8a *pRF7PhGj⑒ELM0Q;(_OYqCJVK Tnu,!x@E_IX~㍻> :e4bD}uj[ Y=WdtVf#R`E5R阅= @ꓴ`𰝹s‘'ƴ&ӬiWVS&As5l"} /28$@lR ?a5)<05:4~[4fKc>~HǾhʿ4I/]6l,klw본wEl)VH)Ն ' qI^^2;6Ϙ$IUkOg8G6IQMM^'=%5d ƃ\ts;~ӕlb !Y#6 P0ZZ 2 LDUؙ AQDEhNz:AJO@S ,ml& vCLyCF r!Xp ll8f+lq*MD#EF/j;mA+ڳ%wؓtHi :N?UCN^ 5їGغf |L_~90^yM Dtt瓂!C!!S* UzqVWA]Mwe}PcȃGvbw0tME@v^Nx|ꘕ9rOx#r=H`9acK%%2)wwt 0Q)k᭚ル{û(dZay6١t]BE 9&V^ OTr=$zͲv2}ĥqs!wӰЛ',06h%ކA8ԙ  \^%Wkk%Wa "^z 4 M;DCEη; 1\DIT9[m&~$i5VT47U";_UQav=/oϬ=kYE^zkj74RbA#w+&eu 2ق$T8;&0 =mc0QIr9"L"W\FAzu:=::󪊵cCuvE%LefV@$E2_]D;W$=2G+07(0>F'6\- ;9lˡẃ{)QvODHDDsӻ^#EAN&+Kg`;s%9׎]Cw(/_'K MkNv \v["N;&L@XjI<?9K57bLn(noөpOtQ96M߉no9ƴVߏr%G6>㸃|4fgmΪnPsh` 67$kSPz?a*= :5ɑ~eM Yb{ёa0HsaWR S2}6HiшW29&2H#bԉpVxct@z) IT4D=ѱ:|^SJL'Br.jRdiM?&cqW:t2A(` B"E4UyC8pS(.HGNE1tDFAK$F#:nzQM i\?̄#et(:&b2JEdȌAr\6!VdžE$G"nb.4p!Qi܋< F)5TDdrep fE8a6<(I;vET(q }"$pH B*dQ8$`vo*g9T7%#4ʠv@d EIcE)Fx|>l;#ල-ăwTC ¶}*Vqí-v;T`.ǟN0/@(WbZ!YcO8# FChb dj!%DE(/k6ݱOxTFgG\msRHp&' imdbvd9j7ddƣQ8uTqˣx,dGywlOX6Dxiڦ|vbl83;;F=i?ΰ>cQ39ghjKB|Aq+=kJs咦. ,emd IaXe(QJ %IW@;dQʐUOp E4P?a1Ǽ:2:ӌǻ#92ܲj~5;͙꽏f`9@HR jHI(i& Z|0)j"iHZj IW2Da L(".Qb |J *"fhjRI )()R*BZ((*) @Y"! Z (iVR &X ) A ĀB)̫$rb:""YBOX" BR ?A-0DDDT τО! &%0iL&J`H&.w!J i @JUGy;Ui,\vpL΄)TtRII3buzcY1T@&h"*TO#q^M<+oŲ!rP'?Ȕdl>$>ph_;P ($չ;?/h?d]U=TG 2h%+Kxa)BpPҩ@PHQ)A(} ;pcX͂b4.}5丯sq4܎MFz(킡* l|}MP{}{xT}]t;fA^[ }zs}U^{=}}i:Sw }wϜ|MM4z!P 5\E_Y|_nOw:wor}'턵mrc鹪BT&$F{fw>l>;`>] 7_Ęٝ՞5;c^@ ;o@dPPt((u]`/@f%ڕ(4P z=ꇻ) w M Q@Qm_G}Pv}5&nAO:AB2uKaK1TC# A@hP;ϻT} 4}@ e':nڗnL()QͳO$6x:*o}[ϻ_sС{o܎}[m}o{W9gt7Pmxww#wgw|zw{}:^;NYU/Ov9omZׇ_Pemu}}x|A mPR(@ G:{J5zm>ɤU)@'}[ݵ^9G|n0_M_|y*^(mvcM=Q_=TVv4Gjn ccp5aףnスVh>^}MGkۦeM=.ϲ+}wpyԝxUu}wyph PuM=݉ E[/=*vB|I빦u:}ϱGGIN֭:/{5OzuȠz׻{vꮜ;v;n;8Wq=yog _7W{ʇ{y}>7e::;3wwۓ{Fmۛ]s{ݷj־}p};_cF_}xݹgw^݆ϳW(gne{Mw}n9.tKTvh5u-m0|7nB.+c;v}{=/>ǭ}]ezケp{>ވ>>l5顠S} oY>3h}6_]}ս{ra]{F\ u=/muhmN`ӥ-ۺV־-3z{뾽{y\;vҾsY uU8\; mn֛DVՆξϷsu=|kϕ.MvW{X3Xv'wn\匣b;^kwv4en^#US;܏3̯6un{ysZui>zǽl޽ۦd6:lMP mr=;-4kӹs7eҙ3L mΫj6rS]=jiet;nMU]9wwwohFU>d+.uk\&1tS$r{-{7ާkL^l_l^!P+;Fz׮ɷ.m9v^d;kDUjuP7vs}TFj-zwŶA}>}۟fgQ>﯑=wu}xsϪ9SlĔVTv}7[h9eC]s7b ;zz[zҢ iَo{wq\}Ǯdeݻ.-xwtS(6뛺 &éN{_l܇jcml+aݧVwefݳ|:|\v۾%A%4&@d SOQFJhBL!O$L<zz2M a4&h#M)4mTMi<Q='j%" )iɦ&ĚzT~Tzj='yA=O RB@L =L lSiQ?J0H )0Sb05ODxP{?ͤDBHDLLBWC)?TUX ǞI. *`kphHbH$&$`(/`&֡" H ϥIBj$J*^~q(Cb"hfBZ7b,u rQiCf!0t:h("\"?vbqQ;uEХKJZ+I?d:vQ(C? #Ӵ_SG]T'R U+U9)ՠQU[h6cYi -F+ASUcTSF*xXыF3Fqk+W3 g0ti,@E%F'N-Q܋QTmJ35Nmۍ>hU9$TE͟^?DhYb@P@~F]g]8MH#??󉰖K}8UGkq!TUHʖTL0hcc͓]2avqccmT֡*5TeKJ8XcE[gVډ Z+fqm*R̮P* aQ lX*v4E&vV2kUUV+L0mE Lj`؇b6Dm.JeL5mM '߭­[kkMΆ5~\2]d,.]R̘dKe[eDtȺ8SV[ [s4fvtڔYZXѭ;qTmZp LhHmFPi^KPtU&EwC@~RCw7՝m*-B6 o>^]"hD:Yiy,hAc}GG :Gtq8c+ë\*[ѺGd\85s6a$%H*1a"tx96~z7k2$| TlRfL$=1'J`&"i4]Oe0霩\c5P%Q?aJ]G}""w29VcԈJNRrB 9)ͭY*-R͌! Y(4+w^%p ,PŶai.Dl\3Yb8EʪX*r;m lc B1hELRS rYyAY+* )m-D^siJXQ22il#0DIDolF8mbf53Rݨjk[Q@l *- UXqvnX\;:#Vλ] UUjZ"*-"d!J6B$Tk,E~uc XY"iM2S5lflDvkM,iuYTpu]jIv- nj[F+im-Եnihi0ŚƱKlib5u4*":Vb[]^r(u`8b5kW-g&юjnea5)5mhEi*K+r'HR6'mfp[u24. "UD51sCea1b(ոKb@֣KvSEWUq.r+RPUja7VqjD6S!Wi9F 7ismUiMVƃ43B+dfE+hYji(]+Rk Pc: [MKvڪ]v@A]e0s2d^b98BTB2LA<ء72?.'Nڂ ʵ 8)IeЉȚ>Q=gFCqljYG5^5D%[bL9n-QE,pkTΓpQc:9!c'#͉VrVwGfu!8"ZcUjk-03c'8qŶ\ZV jLMl qnV("b &?QyQ΢sc8FZVm ABR "$bAOFJLK^JSC0"뮘b"@+]&oRrpg*qʨ`` sUU b.-Ъ+f,X i"Q qph Z"%leВZ\&ҽhĜhHZ2)yL[MUȵ2,4,Kr4-+j TRBjU-"h` faLл T Vm t 2"Ӝ"9*'C!`Dy*Hd,IL0m csaCrmUFp] >,bij9^2b5.m)pRHN7I'Ne dUjbgQ3qbb%ulQhy[96Qf&(uLi AH݆Q4 U|5KJ-uɥ2jP`dQΔڵ]-ņ\[G[ uBKKm1f*J 7ZLBb5nu*aKQZ:kWP,+6Tg*Ḩ *K.R˜8m)ih"b2rZi))TvFakBҙpRљcmՖ1nvQWkֆ1F[ZQ24.Ec# s)iUճ qeuarQDLmj֢ۍ3ku24.% mږMv4ch1QJJdŎq8ΫcFHsS%6Dpf*Ui8UTivsYlC]sI҅ cLS61)Zf%+Dvck Z\ Vb)rm*eYm `SfƋY,0Z[qD!Dum_4h#*VZჁjY:r`&F#`UUn@WTRQKZʦlX-j nqJ\Jc\(!NpmmDq+-Db 3Wح)C;AcC4([UUE-j06U[#C" GCk,Bi% -aAkQVQMej:+XYYTʕݭ*l ;R"JɮK\[dՈX( i r9֮WMLEJUT5ltR[-\YmtbXPiX LPS4iKsV *qip,-&Ŷ[XmBau4KInM-h%ŶKmZZZTQUUUU["!LV(*UUUu[4a\?tXH=Sm AQD#-MvvS. 0Tڂ "%J%5j1*҇52'Zk ԶlgU 26"ֲ ֥+Dfv[ҔK"T5 3ak.kꖔDnBU)C!u1cqXAI$0&2T{.jWp i6mr3ejTqj"Ĵ,v0aFR`Ƅ"*(FeQ(1`4]fj PgQ7\;^J.jVԹKe‚,2ǶnֶԄ$Q(D-"A ,BsqŌZ`ƙ&Hڭdj1C BS6fD3VbE-ȥ4DJf6L1q7S4I0馦/>s r^k_;lYwI^J"ɐS!L <¿!}DX\zyiF;fu3":{<.;i)(XN&X|ի߂ׅ=J!cKЂP8M .okvv)C _].8g뛊}q?!ed5b<ގFth%Ctt1xorxg5;jM]<"N\9[:6eEX%FڐTJƕs6OQmk/6KCx|I/4wѯ,=V6FE$ZNa\ᙟ$ >JLu,YTجƌ4wxǜ*Y5ڌVI)@_ B "2v>: !ւ1E|xɰ77x%4*T;IbMBX-Q:@3%Bx3X,ԼgDSu׿-4-~)K/uQeR:wM:P@"fL8LUE OqQL$ -έb/v@$z[!,]No]}޹*6؂k1%N:}А|ƝO)mN5/Gfxi>-]$o|G{,OY9>J4.M0sG~3p%jDw"TIfbvU~|4җOg;.SۙE=1 կ3oW>戹bm3sN8%Ĩޝ+~ܾm>Mutoc9nU!y2G߄?j +AHQ#J+Gm|q3,qVJh֭r)~[c TϜ+>֓])h jY59*(JRy[?\6К@"H_I/??=G dAs!i-X#MCചOTJBBd3+L20?ٍR SΠRI,'4EEim;/ Gg_xk228T5G5npe*(F0PA3;U yŹjndRudKpI%<<*t.ӷ|--=j;)ςuIܽ21Hmn%BĎݜ,A2PK)EW.33rTp1?4(!%l#TmWJeuFbk.VV!\ȝS慠]sMqNGN? 7z$Ao8t]! :)':wI~Dƃ)-?FSqt<:%>EIP@p+sXw$24v"MM󜾄{kn8Äi\V4z`>z8'Z) "0jn<x %$&cYW+֡q{ׯwG;ƍx'JӌúKX׫ Э۔S'Q5ZaRӗ8^mFE1eazwj!C Dwz;_8/6B [*t[ҠCƱ\0q:nzm9:ܞŶ0+QHCT4 2[IX<}Vv3,@z^:tT-{rZڣD/7\t׿3.ZP^}c13A5kğWהtI1D&昚(yi>2 k^S)Wٮl JRXч:g^i<֨F-aoS":0a;9H5qJO ٫ ޚczJAz(tڕP̬%)+2̃η@ǔ!U \JǒA K< մw.B570}w3*S`u'9qgRZ[E"9Q'[dKv⻦dz{8Zp8<礧Q" (+P)+&Ç'wxkޥ(>& >oPjV x)CObuh9TXʹ^cTItz(ڌEODZ"ӫN>-my]38q] ,{?x򟇵> UX>VgbNL`9$pamYzTY1M4TErLazxP1gH*`κb ( ~vO _&bDxj 5z{80:p}|7UN`B X(, +#TC3%%-0dHbhu0rM LM+U CZf X" h12X<5ZWߖ(|) &QTuG**#qn*Kd0b&* j5`(bіĔ$TQF E&ű*CbB4f #V4,XQ52jv`iFNS"t*iONhрaY^2*Y.69)`-=A`Ё҇"TJVZұ!jq⥐dMJࠧVs,jGL >yo4:]NCMcmR472biZ9& $ 餡 Y:PACɤХ"QJ|!Huf% PLUP% R"*\Uy]c3{w^})d;Sp)^! KЋR60t:nrrT#G/Z;;]%: "\.[} m,!# Ldwl9@ NiMj>eC<8@Pw&qf9L֝$ZFuHp:aM:LIiОNy}_ueԿ5(;g⏻q"iWߋeϽR`A4ݲuro7`݀\/7a̕j 'wa g*U| }t#H;5I;,GO_msyJCr⤠k! $0:O9\l6u5r;oGqZwm.tUs&ݴйwF&"LD&nMq l!}Y%݌j5o  0+33W(o.}c?#o2N{_58BTo n&U6I_ҹY^Q *.36Gz vț΀յ7Eu+/~XvŸaI%/p8~TNTdƨ6qxR.`+==;ug<ө0qu8-Az5>ǫH@?kA؛_A j=V~FE[X<@T rsB}wgC&A%$ >Q/8HI( P5"Tbq#S륓.C"Ӓ/x\nS*MxK_:ք?=a2t܌R9zêi m^WFA }S`g~JN뫘 "jE*6P9FJN5 )S "\vhTu<#xOB25IIXuWWy ٰ_*{h \C(*iD>=KaݧU.!B 69#8"Hѹ$*UVֱ9es8١z2'(?'6 di]:7NܞUHIT'*_:HA(uLr iӛ[9x18ZyȷfJf(H[6pXؒ}|iP. $$h^~-ӮW-'<dz_ǷRd"sՓvX^R.08jӜi l2{ ցaqf$*11悿ˆ^O 9ΎWqكmOUEZb8?!"oZ|A8AO%sjDօkԏ' t%AOٙTYsRHB<"vz3v{U~.'[\Ai<"8I#zqB9${#MC9U:4!G6bԨ'OY>kM[Ho7_?Ug絏1}b怟0sw:tAµB@t=cÝJpn@Q}o[txcuy-ۯǣ h(QN2ڹ ~k4C!-p47r6o'$2SHr\>jvz!>} &?=x)>$.k?jZ! 0w"u#`{e>9], .6SpsL%iaOܜG.\{Q<-{D+:/W<#Ӈfw,ʈe 6C=uxhޥZJ|qʗipT&=3gb?\nL aN=ki`%9n;xͥrl]k'fgyG}:s6R'M> (YRF2Ƒ`&F%68I&_Zlm@fPh:c˧k}{@›{]޻J~ʬ WlGut=tnI#@)s{ 5Ts~.OMˉ juF쭃@FȌ ,&ܮ^+[K#($.KoRRٕܹӉ<K$쥪5o+`׻Z}>ޒt2gN* SN⤟(j ӹ ֛7ks_-A;@>9NAHF8msq r )û uݫlℶ9بI1{$sƹ75păLp<"Dy)R0_zqX¡{7ǹrMMI MJxPwG,zPCB!8C!mSJ;)LuSPi^uluN )6F437T3'v)0LcgN`NY8sJ1G)>(D{[sdUÅKs`%7Z0tA2A' = . ;ʝoT 5͂ e[S;xPD>v~s]f&MeHcXQTc31xhBasܻ sĭ 9'g}YzoԫȷOWT RU D4rSugQ¦j[ ttGEZIjQ׿tPP8Y??u}=FQl;Q(N3IEB"dv'zxJPwFKw&rϻqj,2TړTx4G6OЛ _G[SPt̊4cTG~ uPz }0"! (CP$iU) UpB9:ejWNЩ'58R9BCG,/{2#zs6tx/N=TTOqa.Y5S)u?ʱ/a $* ƨ^uJz׏|d /?TB./qavwUU{Ϟ"$?yT#/,1xGDH\< B`{}'2M[$B8I&[=}?.jOtkn<0 >?4:?cf`aPP$ iۚܫvh'w[%)@0^ꇥ47s3Y7fEɏN*DSY PѧDЀS맛n!B: o hI*˜y'@uucַ$ H[NTP qt{=MI{)D~zDƫSK'̳&wpwI}&4ޑJO2tZpȌI6W3SZG+0VTi lPe˅" O8Mf1ӭfv++!hDAU^ P9i*5?bQTQE|i*Jz!Rl*_RU},GD$<(C Y[6d*BOO\'s,o;efs+>WeoUv1Z~=9ح @?OPd}z/USx$uXH5zxS0ure[}+'jO2n2!KaDhݱqPx}^NcsVDRto8~)+G"9HOe{SB7aeTtCά%l_՘S*^:~6_g=d0f T _F 0EȀ>Y5!~o]~#G#("885<8U/uxh e@v%))ZaGHR/JfACPÏ@P6-(=HkTCA(/z ]~{=-WC:9pY`[s3\UBf8 _YtR}E?ݩuҷ 1PhdR^K<xhAӤ @՟5fs٤ZESxRgG̖e'#p0!t ")gthRIFIȅ(T4 EEP+@) MP i UZ4JTPBi^l4%T@#M(PSH9 02J0d%DU CBF$p>奔;̵t/nah&9d{@}Ǽ=QceRTF}?ZukNg;o;W!PX[+N!7)'jÒ>$&sɖ@}c ʬ!lH$}wGN|w}f "Ie$% $"aA灤@>yS" ZV bWFhRED[Oٵ(B5#`ӝ N-6*] Id fhcTͩyKxSqEr!{H[|3uCO<:WërXRifT< )B}{tĦo:A*3ZGZd1wӺ4K"\ʹ:Xvp_P[y1ߐ=:C,f'CnBm ɂ& (b I2r ;o a*-㛱ZwFR#hqJq^P*fj V(*!ZjFU(k)Ԙ3ꔬ&%^h)/%9}zITh- cBtF`᪌!R[35l. &a#ZL8Љ5K(;q jFwwAF6RԏЉl LLQDEPJSA5CCIA DR1syitO$pX$=sm̦f f1 w!%%*CSH0=]*)'2'Z~{(4u"RܮZCiާ83Ӑ6W)( IJ  W3*&XZNUJV3@Rf31|%Lu9cE$\'<8R70fgLkXfhDMSL&JdFIU9y)b Le*e hMPos#m7q'S7u]X1*``BH&ɢ{})ˣvo)@ fd"]k.niq'6Α;f9Ik4 &aZ%)d`ѐUW`:CDsУd:Q|>1>LN`ݙ\3üKjH_/N >ݥ=X 9!FvNPt>)N[2ё6l{F{GCi8 mg|I&(w!!oeKh^"n8m!OB >:jc)!Un8NYK F#MTJ&,NG/ Kr :tM|e<NDH;XN"/]Z&?`d*Qj)_ꄑ1t9W=PdM/)[Z>ѱN[< )z<]?TsYbnﵻx{ЍcF0WL-Svw\w*њ;{qf->^EHYAܤ -.ZT@9>jnMc1h!&L&Rm9? -<+OO 8sc@#էI?pOC@o^xni1?3O!D=xe\ږ# d:2_wKԔ$s2G+&]*,k[Y칅eǞj,Dg<|W_~֪jѫ*( 7GuaK20| e\g_J"Ug=+2}asP ;SC g(Y\QKƅ)%`RAJWyh!q[ 5h߿T@} IkWXI*BBAiEwQog~iV;k1?9@IDutЦ(x]ҳU gs':  %%8P>$H2"ZwX1+! zc/`<TvI7IFGd)xr=:Hj~W`yD^i'ͣ~5 y&lE4 w^D2 Sli"P=m>:YR8m'$6?Ta]榔tϾSV:3'uD9OR.2(PYSoJ!=~X :]~!8Lr,Ę3R,N3fɂ*Xn6D&49#Ԉ`8b E__Fծz%SM% 1ؿQ,ݣQ\we ~="ɡB՗‘,}H]OeU ;*M=Zޭuެ1R6{kdǷB=#7|/Зo.>cx#0&By2-l}8b@&ÆF@U,~.-<'\~S'U(O T|L_̃4m_(PݖKz'5uzoUgD[4y*R7G?Zaqo!"Gn/a)b)N M&?WwLU6) iqN4ɷwv (aS u*#+. U]|j3%Z~UVF@^1$&ŚaxZ+}ykˁ" H)@vPzdc@z>(ay*b]r_Bf Yk|wWE$aPHuYNZ$oL< |>dzД䓅<3I608scFf)g=N$$V(qG}&s)r+.dƋVM"ok6&(y%Œ:{]/+! ׊&2J݃eмDl7[}\y`i;7 W|0#pũ@:bÃbCۉL|~;ďTn0{1TLT;)2 mW Uf㯇0R9 ϶Іn$X*ZB1ECh;5/+,I@|iGdrM̷N)d gtIlrWIc!$A"E5f3y~|{矍l9ݜjvYVwpCn&WOv#+JN [VHr3\'t䯕Nӝӷ/eb9s=)}/6Wmk{z\uiztZ[S3USxz$)=Jr"q VOζN_Fz*5UUʪQGRb'`PJԗZHh+RBDIX )IRdFza}B̨/b3OcZxʡ\Ie{Rכv!gR֔=M)/"8Pa5'QucWY)KQ[I)/dL"lRw5m;n3S/{w@%q$$`_;[(hT|LF> {87;f@$"Zf"bJ_?'˓y=C B_g8)cќߣk8I9?Ip]^)3pO|(}!AO9X)zʈ0$sD ; x9E\tvn,/!4zvl&(is>Ja(KfI!OМT b1(GvhO+qƗ>D1RH JE3HD2A)ݦ(X(IQ4L;N}[Ǯ{sb63^B !ֳ(n3\8>6ht^fpZ}L";Av"7҄dȄ@m p8[QYȜ'2m>OEsx CO}6'$Kƍ(k;JIBr8ϳ~Ԫ CBRDеE d-B6^?GpgYפ~!}r>_캕Ru;9>Ƭ㞭y]qjva=C1$hD&^:Wh 5-1iMư_Qe1Iap~y"X*((4-!W{%*I3 fO2Y!*cgWzX%#5  lËv#v ޘTN8Ԣ!聛F0ѦlHiu9U~)7U xv_V8. ۼə#BJ$ykT# !(oUmFB7|^ 򞩗bvEDTTTAAUQ&Ng. 4 jnW+'Q8q6MwEg8C()*0" h@ TrqJ( J)6E˗m5{m޸Qۖ6\λIQEDC3 t:Aci\:s]rSSӥ;}G\m}:ݔ+vr- C0Rj"u#6V&o0]vpk0'ϧ| U>rA (ҭlMWhY"(0q*;]o߼ &iVt mt_>"'IbI>AeL\^:x/a%}{!DAEW@S/ay?7w<2 PNx5A0HOcHپu ̫_PQgV9h*GK7;@$9R=k&dQ ?r^&fQi0ys;D 0}hY9r4qк@} Uq3w<?dglz[M[gL $xFNwtU(*} g[S>׊\!"ZБ#I .P| [hǡ/4{u:?5%2}Pެȩ7L<aDh:|yx3K/M<0@=o/ [5& A wJSp4PVmA"ݜ1(gYL%Mwٕ1<2t4 Ļ~TT-|IIQ<în){-Oyμ7[mz~댉m ӱ'5o-ק?b}⑞ r<9x¸F?:}>{ܒDg)^v7ѳ#iG(7nML͍JbdVi"on/CcyG&뉝- \eT-ۤjnt'?i-]QX#zEePPVghY[e#T+/ջy}63Z;ݗ҇h,Ï_wR;4H;2MC~3{/}L7m}՜a葰v4=%,ϐDldau:ck XTZT!g 8O%aTRCXj\dkڃ 10(;> aujp&NFumj BYWcX6dѴX+6O en$ _&sߣAjmT&kQj' :2(]]X/*1>(+-_-M,{]a;Q2$RMp FY_O˔gœmYىɭ(,C}:{ 8O?R\uPF [0a#k磢ZD`\JFׁV:bvav+0vr0%Z^ն80 a-xP[$3H" JX³CP.dOv{$PO4A:3)k)^3"8anuЮ٘W{8@y*6pX  QXsַFЀZV[cN;>r܊Ե"0#գ "Cf5ɯyol|} ֵ[`7^J.cA7Mf/Frz#m+fd+b 蟣˯tra_It. iaYMNdmFo~h96FשCmYc&yhaAo.(3&x>Ù!~fd$9rH {{IL:M(Dpx(@I-Nܯp8ϻ=;{e]xmaU"cBW; uj#jn(i7XM_(۶׽P!Y`9 +MhrQt9*׸"-k=K(%^x]C*ǟo׼ηKJ}DYeaƨu҂=b!nj;fv LSOO8N?oINOY>#9h%5&q:5{hVա hQϯ{NCPrׄ'-L~ËKj轢EAG!ĉd1I2Nkq\!l6|vuSzsKa٩Ţ+;U%biJ\:f u PI$$E+ޢ:m iZ ݩ6D6lHF}dBDE:d̫ձTi=ƕS]K(r$<әlqN}L$?i?񌏻Ҿ!qmk?>?GʑhJhLqi;V1P3#+XW$I*&=0|q É?aTa,I}@Lмpi _H=1ATy)m]Z;ܳK>,'UKaHN1ԫ3:vṪ@ uZV&eďyU}0iȑa+Y_uѭZT#m0q7W1沌xݹ'/NvVʠO֏@ЗIfR&ga2/|OUq'k4XDolQUi!kw7=}-ǜ+-vZ-Us\9s}gVzR='{Wt!qn痧#ЇfgՏB="Z%DEE؟82d*d)r|P;dD`+P_5ADNfv듇R=xlрVL^ZG ih{7x)UPrG*$&r#&M:78B$pFOTI\҄Of$mV#Mɼ 0!3$t;`Qk$Y =a~'02cGi##0B+#|< GO4m7 V?qb'Ԡ :x}I0S'+5PAM޺}왣r,Ř*a-V Z5EvW5 hvH7իɨ*W~ 2`I"pe>_RXF]ٱ韧Pٮ!)!}N @Q Oώ\\S҈PEx:#lM@ox:0~ʙNsE U g?!vl{/$um"9|D5U"TĿZI͎~$;?2+5 ^i29id"pK{Ȫ'g{&!/bb?-xv@j$q1{S2:|(ty=Se p6`ZA=<5Q9Ia AfX7U/jוּ $;tF@K*͊0P# ?,Ϫ!HM荰?GI5ޗ@O2=b:'g%r0s!$,vt>!b8f+<ѓFjsGƷ>TZa#, $(#m`F6ׯy$8C5d]x%&pwhxM-KmM$IZ+%Y2YƔ*^N"-\r׷fa[4l7+n:c&3"T Eܰ6SIF03|}[LXpR<3U‚mp}GnU3??YB6cz"Dr0nd2`dWJ[o錩ra $XZ[aׅG6l U{BۭPNOKU7DCel.(qB~ @$## ̚ӪY&ͤ?OcbI' 1NI>1#?LYѭ;&qq* aH9t!I&v lCëyuWo?v8]Su(3; &&jPjaɸn?䟹ӵQ0}d~<|NAQOȦ N sKq.tt"f^s%+lK,q:BBAPf3C_wH2~]-']WA2mDTUmH$U̷$(\џfsbD Mu3p+Ѧӏm!V F$ &OJ4r&aR5- .oUaabA(;q;ǵ_ UEb8@\_ Ä0x[.Gbpnn؎ <UUEUR5UEUUUUUUUUUUUUUMUHUUUUUUUHUSUTUUUUUUUUUUU_h'O2z2b:9WY_pC6IT#km(f>w^,Z$&h t8gMLoZs p J7 AyGS3; iӗ!m:Rǥ@9K}.d/1jƉ!tͬzшi"\9C&Gƴ|*As4D1Am&|n I{Y߄'3\"sr5]wy Nr.x@Kr3+H\@Q!H^o9{IkQ^)*,DōQs7aXNVH)=},-%6I ;Zog<"eP~H_rߴz2/s"FP]ero8 hr-J FE1w] rw]NN;2kL]藬,!"UDQAE4bFBq M f":k.ik^/[Cv[hH kNw݇VB82Ǡl.D=tȐIZkӱ}0ޏ^P$}na2AFzdL7}%B~m( EAlc{cAqagn=g1[B >nKk]`wzQ11BBLcIYyD FH>+f R2u &\V<8n$9T㙒CWmœ#V\㔶Ǣu@S(>F'*11LMçYmztzxVbX޼>|yMoux & 'Mi\yWlym 8A{fA+g.㥼DN\v(X_۝;ӃE]K8u'/i)3!kxJ1G󺐒* OD=͌n:.QE$DYC_cXtlyv-yW~3V3&oV]z_n.eK"C38쭢*ĉ~߸b M~S\w&%(P~J8PIު<jMʏhsMKHB}2{GLF~^G)" kXJgo]6CJvkbX{"D~i0)KhO J5:U :퍂b-mgqq<{S?wެ Y֗sYv7>>:aOGv *תo vwS #iRʥ(oP? KVoSl0TS'Bo9FiT6Z^ujhanl$t ﳦwDu X^伙TUEQ5EUEˆC =fř-L8lRĐc>Ս|SF7#aqm;o;[_ud're~}WՑ!u[claF瀏 M{5*QcdTÚN6PDBy #YlŠxT5ľԇFtnC Xi|ꗭҧJ ݧ8b5?υ]0GǕk( JP=!V֌ʮ1i,EQq G6M +y>xCv9xKOAnpƹÖ D} 6CRy}Y`<<:#3"#E:.C]t]U>DOn7euE}mɸ..:: dXFPDrs7; XѵiX[^?2ASd޴l Iȕ C9#fqT}/S7L)~m55%BOy?em(Q K>\4-(Kl/;?=勿R0cP{,S 2>36Tiۢ]dⷊ χFRm6=Lll=%(7 8ء։0Dg 7`1_ntDYo ^@E*HLQd*l )N&lxy+pvwtKp?+ U<;Z9OΜ^?8F!j1)IXxgB%qvPcdqHk vg59֤r~5"V*(QB~-_};UUE_jy "}uǧ1t&ș"cӖ&SΙ%/nՋ<]#F&c~NXPθz3Kʒ!yZà#Yh]-b!R4+ӳߴ`33 5Y0ms(:2B,zKkRioܒ竊Pf5~Q܈2ׂ!y`. ($MWTki BoLI<,[2&:i!̡m(r5-FOݕ1tJ{Ƕ=ɏLÅ):bfx*OTmj݂p@ b<"-X??PxzgD.1$6%B87s4@9צo 9{z|J!wro\n  &f:Ꜿ[HlABh_\f=  &6);L^!>W&ڌ=B#!3^WSv w~9{÷S -Tu3PL5t{ʬ+*'g%’ (("%xd]ǎ#Jl'(*yz:`p~TNDƥ<#O"Qu!<ގ1닶Cx-Pm JL7f(1DOK"4 Q(Qfr@n.w yl89Y AD`d!bKؖs9| ^Q +^Obk;4v'gΫg(Fz!xR̨_glAъPMe*pyxu KU !\wx> PҋzúE2=%TOcj5B D5;5'BQn sI0%'*Zfoh[dsHZ&¬ŭdz38Y8VN&fG9gHчvo^G~$[B Fl] #n&jA9dw}mH 9 >ݶk9d'|sv m~Mt'=={,Dgm٘LN9]I&y!6q*z#vM( `~N1ڋaOlPs=' "+Xbl+ǘThZudAAʯ{vz{9 lj̃is.XXWS1əAOЙ2jY+{0ѥCPjS߬L 3FԚ)03ϛbNj>t>P E&^ sQ^+"?b \)zm:JGFI!3ZUIi+[ԭCDE(&5x!' BDmޤ3 $xxnc\II 2HHn~i|PM vPә.Yjk sD ͒I⇲[I.VnW׷MvHp]Gw XCK}9zY5 j(`Rؒľ:z"C5Rm4놓E@l}}QIT4HًưHZkȳ R4>##Y{h} MG~x~{2j@Lۡ$xg' y!f)L/J;p6.1SGGi1!"PkPvV?3>t3QՀ')7şM[ٛ0~:جmzmCYO{6|)e6Φ$@ Ul[-2| D8}ktrp;`4i@SOڊ&P!<[Y5STAK%:$*D1:ZXzK U27GEX@sddծ )3iҵFۯo8qƙЄvBٚt=p -&gm3ݑ(a%PHt8du8~E#B~X|1`f #{tmfj6a6EtNծ`X{_ZpMuLra>ckQ vg+1dp髌&&w8B# ]*QeFq;hYqV-**4f֘Ռb%~뭅kYeU3;vF!@BMCGnǛѹ5VmıBR!$$9Y55jiƴq.*71Ћ¹WƬbVo噟%Pwr5D}iSzY='pag:v~~gUDTPw*m4 4SJvgo?a8`o B`KHg/ɽ VZ"1 1 up)^3eJx8 Uf(+@̊cD2=- i@r$#} :ýwQMwubݿZAMT3TЈm;Ea_.Ӯ˜[>")?w$!4b9 Q#oc-oҲUMLp65hAD xz]PoEXZuUmaEk\h4 fgʻ9pO{8 "ޛMe]aq21NXqy BJ;kD^FڈG} ZۣÃ7lv,; M$*]*ڪ(gI7|5"-Anhdde‘Qi ˾! nb5u&v>qh?ʨ7fz3A FRa"yյġtUL̎7xLϜrP1u 3ab56_mV̌4;{@@|GӃaPl.iD"&ه=|IE<'S???Fˍ[9<-ͯI(^e~WV=9X{#eDr)䣑4pgb|ۿNFȢLZA H@,2ݸopy 7aȱa(̜lju_mccQG&ee>85nԐ2Mcc&" XF0qr9Χ!-dũXap9װ"g6'!uGqg0i~C'_UpvT*4Ci;]k5]fɅ(a(A D G"wʼf˶dpQ8xGfƆs.eMZ"q%\hp9dC./M1 ,Xfg~ɲu+٪>̛/AwtǫdG/+@*3 Y4耗$&'!E'8x?Y|6k4Ƅ&MÇc$Va k~|:fTm{ JE:1FlaJ~/cF#FF JA0k6Vy`Io5ؗ} ۘ% g:Az?J:tz? &3ᨍҪ#e7fA1,$+ܩvoJ )e:~,'?Ȭ@#kj yÚ&@'$uc9L}]ׁĚ> Pdr_%õ'¡?Sj8Ɗ8a„ wğm,E&y}p1q AsBGIic `>VĄSFcWY+)FZT1T.t0W`tk^Tb$42d$B Y mυ)ۖv,3!& ic.38y sAiۖBED[^Bi;ȓNQ5;`jY6-r" s|8;XmC2O!gSqn>NprUی]t@y;VqB'$!!2C_xpЗIBXwvyҡT*ziɦ8`&-GuOZ&KI\/9`C)9g X"ʃP&-a<10qpzT٫up6kMX^ce#Hg8Hw!mp 댪y W5)X53 7|k,,Yʊ ӡ 5}">+Jf^j۪$A Fj @-"G`B9Ṷt;HLSe\!A#mko\͟oac>6Z6}D<ɱ<[?ڽXJ5Bх?.t8~z,W<ޏ.Znҭ$ ~Xp48Zn$6u6I*khgmFЮg<ݍra6N Ld:ӋqC> ܮ ^mQlk$:,d3=왪32zLU ]>D+l$zbB#GUᇠHc1gd u@Pbz]q/B9?hOUnD<(`/+瑹8\#Cڴ=4"L(nnu mL)4YXx7b9"MM>@g$xf~W;a"gJ^t .LTUMnMgZADT-̺DB1(4L ]GfMΔ-]h{cWnuխ: q=9I͛gp[lDam|ƫ?M_eGe].-~ ubC.+XOY#*| J~y&e7+:}34Em'$\З;[$sgբO B#4\ئH]3P^t2kI{;5D+Tos-5lTW!pJk ZَnZⓌ?GfEy;2 ^d=pOTI]y& 7xVIv+*ss&r`XYJL R84HF.q1싘B鏆RMW# rV){RMvvRvF6$W`JC\^m òH;37&pWDk*U:a3!qx=r)dF 4 gIJ!笧b kzudNy:WtkOtp1eTs76#2C"897e/=$t5B2dԳhݷ2M-Zgf &S1 G-s4͙Y}pW{@D;ӛJy2Bb2aA^Y2[ )YϾ'wZ ppT(* J=NhF w Ҍ d"% KZC>:|CV*18MحUh_:!_D5̊!Vƅm{a|E3b^(H.~·mPL{}sR.$gt=8Igf??g@U=_&f $8PDyf]RhTlm>5Y[ŋaTT: 8^Bo{0W -H h"k}&A gBдLl,!41bֵ"pCifGID6>QىETMEUMTMRMPRPT4TH5TSI0JTRRUERL$EKTAS-QDE-IURRPIUESBUKEDBHHgmFe! 1uX?mw4%L?i=Mijv/ovh&]ae1B~(B.L#sKx|B3:=7][ N㻼} zI7wK㯩FL L.C{%|NA/: fJȸT95jf@csTꛌi :#JzxrtϳSoe _ii#rDrR`i6 y1/1y& ׺sƉt&)"7,f[aKtp窡=e㳌/;>9ݒ*-xcI;~N;(*D?aUg%YAPay9ب#[|I( n0^OaU4[b_WUu׭wLAGV_K_:Nd}:YD{pgUK\ǢN6hĞJ8br7kѾ&mӲi(Yڊmqx6>g"xY&ٸ59l^1u۾zlѮo_3dyz >~qWC/j* soa؆]wÂ+CZv&/ldLHۛk<:2CO5 7/vXiocv`FYt%O(SYQU DxGчPq+G$Y m 1=EE^O^0ԉ:~y|Qc?g?1BD{s]⬯MFټwﭝos:7њFCKCx8)q€$n[(D˜< {Q\pVPԳKU5%ĸb"Ҵci9 ܫ.KXaQ)̮^59rqkIǩ/j(A;?X+x<4|)&'šl._'GcW\ۑȬg0iO_ore#N;xp~'hx/gNETWeg+W©u'uWS#^T І۶\,%>{]|]fFom5ⴰ+{k6hلYc)>ϲph |d<78$}/7"v?I0uJz4JL}:5Y#&fwl2EN}LCEksKy*$+yHu?> 6d>#'{B #/w[*%6=⻮ 0z"%d)4[H%1d;mq٢唼3"s%')"f̖$gl-Cr'"}{/Q|3.X)'q9rgcsu"̲t{I׮ӽTnAk"-Inlqu=r4*xMo-,<5{uŽ~N6 ,MOwWH#7_ڲrwӎ9s -e.G G,KM4~wۣ"aSClkC8y]V*b3NT @6ϳ2YdL_|.ҜaQL~H>`+v'qcC2ױv]%ow}lߙAPQCHR%*'PU D@$onbErz4EƢ6jU2Id ~/dw/z5䐎0f;^"nʟoflf?nc"HЋDJ1 JQAS)Բ;E"[!x? <Fd쉇yin ƻ=xnc 20v3=!-kZgoق  H@;mGrP?f/$䀦Ȱq[h^t?$1yRH"F" F (H JD!@j_g;=O$z7uL uBjTR!s`V`e ֞?iH@$)<!?{n}٫ay]Ǒc&jАބYc'rdA|d []nfod{|$ܦ>@غ῿S\1z/9nNjn@ޮBLqTC:cbuim lC`P>WWm޾3q8n4gH/T<XI)"ja`bV$(J c K?h2f X^b$)RH@SUS3"2S=9P$4mnl3,@ EK#AJR4DĝcT42wh"IJbB"嘆i6:%P*W?w [7S?p_` /|zcWpȞl$벇Tc`l!  $IAHB+NJ'SE ?}/cߎAx}ED,*H J+BHIODfdZ&@`*WfQP҂Eߵ1r v}!քK7򻍛v~# d!:;9Ζǹ7s1R7 f tXaul0PwsEG\lV f/8ؔRPM48*" Yr%gZ֊~/X$*SrloE|4jM& *Vx{b!{I?~@}OD$_YSRD9[Z'_69މE(CZ.8 ?o[VUR_q?I{؋S֍fBHzRIZuyQ?WWѾL*=bSAi*_{Z;4Oܱ+ ^Yeم(YT;tD=p4h̏e{,mmer,la *wCetp&s(#_J\Hۅ[f6JcS/'p$M0kV;x+~t-v+\HĈ=z=WvH[cG*aU'jbY 谖|+tk 8swo(l E^[J9C(+R3g/8eHx/a6_eh# +*{NY7iK3!_پ`)iV*S3*ןasٯ+>ϭA"#!< uC8dWtsL(G_zz,Hyο􉣇7Oϟ!g'-NkMLZJG=Y)# guTy'lH_ WZVy6٧LzD]jo`&!*Nʽ?Po1+keGhim# T8s9G`*mK08Ďxx)aSv0H"ATOP_&mh5DɯƘ0%!sbICUł*UZ`{ثdՒ&Y. ~9,Tqp9tjT=IA.M"-яQyaY"9QV!:}m DD?Ѽg?j9t'}_.[,b>@̺0=(%bx#q&㳕ΑAԳ>m안MFjѰ+ƍj?6,9!:,w!)YScM-ay٫Ԭa=-6hu0Aj5ꝫ7:ٝ燆:-2)”_t?pOo<ͤ8ET$7_ ʉ|^ yR]BM%U}{ng:4-w{90aq?-lh՘HjII!2fn UZ*`VÙyBA%"a{ lH7j$Sg!&n(Um2ȟzgLْG+B(yhB'>΁PQL)L\4_&N!%G!pӚ.!-4PxOt~9mA1l]6r&9fnz[>n0E4XO {+ljm؊u<a?8| AJ2W֜7C$iG_[Z>CH942ɑP#M̙ D˪/@L=:Wa6đmL^(nL%:} [ݕ: BQ7ǡ[=cs"%V |4̜Jߧ.(xUgK-|/]t̅R(V <*fHTr!ؘZРڇD 0( Rb#bg4x4FU_7 c00AHa Ad؆8=-6Di~){=RImC~UXRVCwjOVF]04!F8%> ^-7hT/ѰG7"6O%>m ZX C@\8*y` Pd&lsO0);<01=f]LCҮeosl\tXб i5|BBD* F Ҟ;xPpy!'8Bovw{AŇЉd&U|8)X3 /I[|G?ndܩ Ayi=6i)^KЎs~@}lwلG!4N\3+_~i$lkKyYkk#TDrcpC%n mAdhI/!:n8$Æ],~ٺ{v0ӵ!Dk1tmlC1%U7Q;‚ͱ75Cө*֕b~!if U¿o0Bʙأ1XWw,J?2C4ݒ󟻦cHAv5 U4R($~U;98 47i.[/tR[&qR(p -kh(D*w&LGHF:I^w J\oJ!BUvөDjM'lvty]*wyg]Y\|UEn!}_NյN_ СPImpㄶԞQz]\" c1|L8-oY׆˗bQ('Ndjudw\2 jb4 땓u}ċo|L!"pGNv[cߢs#ZgWTk1ϛFyxePo8 6ͦIL-ڎ=ڌaaOeec0ŨTک6kLM9[m?uU(ǽTR"ԋWL=mhy{WlXݬ-hæE}:_t1:KN.#<3{|Jy% CR6妔jC=ӳy 'JUwJ;E#ϡvS5YMBdi2/s*zbl̥!ײkJ~ԜE}1^'TD|K|[rH@XrQp;B~lj*"=;~5Aym"ߤlkHML1F^e'T}~zåo9N~p|W1SOhE ֯̔>*(ыI9O!w1)(T./s]WKwcۏ^'^8ƙsjۺ[yԪҕJUcfF]SiW&tXGAQ\<; TQ(#i;/#7T6DȓwKRnwxNvv3T͓*m,|찔wq߶ٟ/<7e!oإH(NUUUUJ 'ӳLlW7G zeT-E#Jpqx}fhҮj/|f1X~m^`蛌~u\[xy/uڞwKV$ %O$ὣyXESF:MדׯGоzYƃoymUY'aK9e$Yg6I/S?B)nX_8BqͽyWӺ? {Ixřͳg])6UQGqu\RVO<>ݐ8nk;]bFܽQgU.YÇSDR^ƎX_Dƕ~ki<_] nb WKWp[rY|ʱYy憻%{\$٢OtD .~kWcդʄotjW_~/i#SqnvP)ۍWcf\l|"}B| |ʾ3]ü%ΆTf 3>xEu_O/B gD$fֿ'3-%ܪ_^Cq>a^q6k SFM Bt5^[ @I:pm hD-Da2ģ+u71M۷([] Zzk#ܷ~%s+h2<xN(DB>qզ]VC_!wwtjwg+Hw^^|'%&jr wTnO6ڇmڏ/s1i=rwUOU^ϖI~Vvium<Ѵ:lVX>Y?w[nى|nr@;ލDU^ n0][a(A_ߜX̿G\K; Mj|9guF1ORmE8CߟMJW\CW˱@CO>%yټ' X,(r1!eoURE>VEm#5\k)v?L5\IMՆL~wmc;η~δ70L*^xL)]v̐ybRD||ެ'R|FSo<{7IvvS^q ѪT,[ w`Y~E# HĭO'(e%(.]GGpҦ7rU5C;m++|7%Ը¾:$˜J>˚;#WM6K^#Ǎ!qjn_TozawNG~vo&SivM~+MM}=r7½z-8|džʧep׍z:,嗏Wl -^]guyP)M8z ;ߤX*~ka= SBonk'+׾\}4݆\eq2>?^rջ3rR%'"#"@q7y8{p*% j_|2Ox^򖬼vvN;˯U5z-ۥe]af%閝:Rw^ ʩ]ҡ]vAO>s$hk0Ni}sa˜\Vzm7Fp*YV z _ ky[]G'˗Lo n/{78JDu+nˋ177}O!n}>}|ãG4#(\}8C3)K@] rŎƺtI6oQ*}Լ6'AFMv3Q2ej-En>ޮ}YeCwј,mDUܺX ͫjGWXzGqwyYBel7m]%#qUfFF,ctrcd/9ݒ2_é%\>^'K ꬲ{ƚ?frVVl?#V?ML]dSTq&qk0êTtkYiBU\}yq0.0y;٠w5SNӽ]~9%V`J~FIA}óiqTmWp{[7[OL_t7>v={W-))#ҽ+uҜ^Y~Wۭ|W\ٳ\b=;q?3_<%zzldRrMOW*{}{gP^Jpt;zr gDuGCKHCtŭWyZ_-גVWo y\,^TO*yp6aa8RЦQv 8,ٕ[6Wѝh:J#˔uy0{z骣(wZ6OuPEež^]V&VCiه5eDV>=3~3;Q῔hI6C((<^ޢʍ;ldvFߨK>6T5rT{uIY]46l鍺l^-˖0w fsD *^| lK,6xm嶍5aduaNad(6ޗ llm?xf$&&h_9E+&GUكY=z)/!ŗ.XU'˳;cՕx yU|Nf;4D^\?/UYz< WFUgȳ~8-PZ;J̏gNʭ:Utm|!W)Oa bmGAYۮ7r,ޛ-z6`?;!Uʼe–TJS5?}[aNv%Kyٮj'=Uf݆YwF|L6Ƽ ,aΆYϪFNu.7e}#`f >ZTH t"QN}oVԵKU|{i{/ו-zYPYwaT}NTWy : ejW\S/mmb;vBVdc'9E%?/D?*:$T}2EHծܷG~.G-UKY.o _zO/!e}sS*i=-kNR[gvga*ɺY]bfeMWҐn 鳡(AgSpKΜ~;%N|8j߻;r'&C%:9Llp鿳p޼}ZhrEZnf:m"bGffo\YTo.qp:,rWzSJ_zkM|ZR~zSU`vZebߔ|z!)U\5_,7N#rЏ4Z'D~C*P6yl2E,S&,PC8\[gI-G{=+ ڬ(,— q rD&`*0a/HJ#:,h <,O%ABUU3B- @C !- MDKA1U$M5SA5 3P1!UTҦ:B$)( !gXB " Z@Zjʶ*X@"dhb "(j&""b "JB"Fh*X)&fb $Ii")$(MPQ4JPLST͠ PEAIL2UDQ0A!,1:CM I0DKCE13D P2EHDU5CCȕITLE+IEM)l8b J*4K:"!i^X$TRADMDGfJF$Qjfbbj"e DbX )hb B@4D@KTIĚB"H$d~4KT>("^|ȜтZXH!"FdB(Kn{pdu J_hM;.25IDAB}&SPyU," Ow߿a8^|k|Uinovs7*W[ 8㋺È'pA:0[f.4E,p{PHBKC)h W &Xr? :7aDo=;6fJT@ (FBL@D(?NV&t42 E4I CB?#xc"N\hNs 9 e*Sq"B: /by-)K $`%"X%iʝB (WB Cҁx uG_o@ An&>oXWY BMӉJ[FyqlMƒx&UrR8DRYvî9:\Bqp08յnTqB{?LcJYL\: VxO'N/`O,49DY1y1joݠk3[YA3>~f5"[vNY:2!Mz_i?dn(]CGfϪ;7Uy[ # Ꭲ{nn=\wD8w\5Z?V]8w+Gܾ,L7ǀ.$zSt!;q|ð<DY^&"*I# ZGH%3ݖZ C u檼x"U*v~m" K}=#侘ca^$G9}׉]?qV̚Fi:cJ#uQ$JxQC]Pзz;zo N?1`gyJ!9QEw`9\pꁪN˸N:kƿIB>Y;BQvKzos6>~NVz5E ImܷW]^:;B ~Ta{-؛~;3s9,`ͽv_dOWx~M@{:,'U] 6T@՘  8F>߯5͡R@C gd(j(pVpMU,kC=9hMd9 F0By5nT0}3n{BWbaF;%u1һ2`6Qeg~3Cf7tٺfdcȥUU {v'~ʸKΩb{n|jvekQbxxLn벏?=OmY²6oeD ]a,={N$.ÉYGw; %2 λ&r/c%Mkqݳz ?ܞArCoN58鐲ybn^D] 1pv3_@f+Tj*"ol3}<ֲFl,ϲJ#}| n- 4</<[;`Aܘa QU(å287cn{'h)VmL j@4}+=y4/[ˬcwdeÐ5"DžЂ"xâRzRJa]aI;1SnޖV9 f}EW(a-gjn5uk`BI#>_GH1v8Z t]P3z&MkUU-~i?N/ qEm("kv..5Jz`xQ׽懚0+l>d] (afT+"q{;C{rCR QsW)3=.csNZoH6"(9`cfg[XacsСy| >x#7W${'D_-+{@KjC_O.23$1z+ mΘ&RdžX _f{(g`ѳ_fX}ܜXq$"D<,'Jbw, #a-#] zv=םƖkǛ*b&Ha*" -mTBr9e]<*,7 " X ț!,m %:gYX|;h`Ql@  6\wTMIQ}9DUräE 2^E| kƴm'cB誄I/[r;K =cEafπ_~ x2Zx71ݩ50z,WwK.7y1:) ͑u 5YzM7o1& V]%R7T>BH*4v;zDDpֳ7/uI)OX\>b}hvbtη$xj-en*ܼ 90xz۽DȞ^y7)iY5TV)\>\#ofw˫Vd[DK9wZ1.y]SzEcزP !'ok0i}#N==wodZta򦞦 ´e*jN c%қ3[]m#%YQ}TA2C5IyYZ&.XfδYZ{hzvM9.]枭m\l5'ʚuְed=Tn,4}Ѫxo e*)kT[^y95zn ;AӬU#%a%˹3"&MFwU7j4ӽ]Ck fxoS7O:ow։ޡ%W-fen֣mNdNINIZ{ȋ=jbfTT^nt!oWΈ$K[}#Z]쬗"/7dNi"!ngz*i5[:r7oeeSZUQfZWsQخv$Lʍիz˭oR0ԶȻyf=S4oQ&NoY>n#(\'ZykU)ufݐn\5KrsstW'5UDnUVz͖(xYRo3+hʛ pTc8AUIҹWT")M9g~'O?}c/a ꏉ_nC 3DaJQ(Ap2ā1WȆ /ۂ/)BRPV) BHDSJbQ*$*Aoa*G!os9?kfT]Woܠ R_4?м-@̐@%M!2fRf hJ@)("id` be _^:QZjY$$" &XIaHJBH$a EIP ICU?d(8S2ޤ?H!8m7@sGZ L&+? !W C`]`cYd"B.Q:N_>eLkZҳ-ͼ5oټCٳKȍPv lK |0y!Gׇ y&R+}9:#1evͦ %}:"tr+&eR-Qpd˥u͖f<%0=`gNBh񵁎1gv5=67d7N <:w3'5?]Gϙ$$O A1O> &\áj8ǷkTQǁFBůmD,ͷʖ Tëc{<{ñS&(LzR y&8XI*1 $H:9ٍ#"1*?+>IsXԡ5Q6a#/8Cl\ByMZ3"^AkrY`(+]K؍1롈䳴v$b` b$ɲ-wonc ض{I!9MQSj{w;&>f Mסpuqs6-нL1h+>}|EORYTǰ` U9֛GCFɹXkNc!ɗ S SY6u:Zza |EΡt=sUc $Rl$ vb Ƽ;؎Z=ЀqP;Np\5ԁWEb{K:V^aFY<[Z_c[>i g`OzBgD<@.{>YAslU*_5ʩx7_@@7ɃSݏ2a; z4m"ZXQ\z1T@;@b &'t/J3Yc<zCnu yBư*,nR-h'˅Oj0 dtY-vfnu?h=E[GuwNwPr}"أ&݃YRWu13;s^LzPQ8^3v8j+6v'f hFB@1؄0+`30 W8QGxVvoM,vpdsR!c ȀoJn1FM{HV]~?|CGS[D9%T_ݯO7<| }EMJ(y,TYNIC͔asQR+RtW0L|!\5Oikp<9IJk#YyG0z';|gC3Pr(Vg *gة4x jM#ء}k I}$Ye/QlƯUi9^m# 0  H98URm~|uI[;-qb撕$ 0ktu֙촷w5iɉ

9޸?>Uy)Mw=wU~R]귮wz>y1u}~;:]]s/긭{z+^:^bds^Tx{<ȖC8 s6!zH>g/}Iaw(;8>Ξx=cG/!'T&?}lg2+j 4[ ]q D@Q`T B0p@b*"􌬕#*Uݺ3'74ܰҗk -yj]V-_7k9Ly{G8r/K& J>]ӥwaΘfw*>1/Iu05 ۻaj,XaQ4/ƅA1$^ NP|S9-fb%ADanl Hi-l!lˮ]}:0җ=HˉdTaޥ8:e|okz뮟:Hu躂gZ=xsU8O~j8F+:1vpJ^z9(ΈMEyBĜ{}k9_[׷{}_¼Poޮz&K@#rQVTK@[lzܞv{NRG۞3{%OÒ%7ۯIi["lMrґds{ ։O|9B9e|!`Kl#Mɩ΅5Ma8a}lݍUtI5W֎٥g ܣ ԥµr EXlg l100Q&` F{,jnˠD B,";y-?tC}؊jK&y4@&smmSH؟{%SeOI ?f*̛@oB@ ֋!oHGC>܋5H`%#HE6aIQE -PUS6S5A1+"E,TU[fa "C\í 2h BK'm%H5OS/F P<у2IPjdªMX;Z[-`) 4"9F9(pj_7}cĎ|`fd6S2R )NeZ 9,m&a t#*!` >o=d0Y!yfGWo|}_ oҿ}=~m_|6RWʥrƘeYdTrvd8aTVSE+—)eYHTeQwRxE;b>@@5?=Ba48K!B""""(((""""""""""""""""""""""""""(DDDE@z/P. ?`Ds0@/""8A#<` $8j]A0/ Pa1YL;@vßBDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDQEQEDDDQDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD!BPH$Ь5 7  `ZF4Аl:tr p`A dTa/ '* >p]| a>h@=P5 apBjp\rłG6C אN ]ew!rI @Szh^8Y=7! pv,6_)9 $lw8U}MCuUQΒ!n#(?&yqI`1mOLWP>Sc,@?믓gT; |u`P:<1`}|!ň LI:dp=[WÕ<*]t~ U ; :J &.v`BSӏ?L~]G܁CPl GN; [qJyE"ڧ{ӡ@{}Hxaq&7/}׎z[ggCe/勬ʊbCM7Tek5JEZ,i]΃ dH^Kq1{=n 3yw91>bEoP/%ӺīWB瞮 EdAvzk˧oUM6uo^+awviWigIOyuѨ{~3Wwwx}bv4)O "A#݀H% "sn)$+^+wWy7HH~X?gpf=qC?@8eO{ \_TNi\HGR&Jl4L5y:s-uΑ'zG;T2>eU|7_vrʬ}KV!uVfU~f0bZ3D2 ΀! 3AF蜈8DzͳACOPhšF!d@,fJJ*V4I!uiJ$tLQ`~]84A,ƶ꺃d(q D |^^ 1Fڡݻ>*ΗǻlZj$dL־) lhkôCrKYqSܝ&EInL<1ۄ* 22y0Ow9PҾl- w ^geF]ҫF.3ZF:Y^Cu2 SC>m@!%XD,7E1LeH8`j2,A``z#떑(J)1;s:wNL\kؑuɾo2Bh㸬R='qh|T}$?CEVa&@zOqv`~Xn<`}HlhQڐo%3df2j\O;D'wh91;mjjz.8Њ&N`&qn91OT)0nSU 2>_`$$__cQ} THȐ&$OdLRRSCAJSشJQ$eGM?àA@פT񔡈())HU@k_{ UeE)bO>bxK&tGGS$;6Z$iYl/0m]Mݨx,EuKIKL$$$$L;Su}|jl%)wDxMd]SS Iu,@Ph @Nv%-@!ҹ :!eǷ\/ȑE$j +y !Jb(5 dAwU\| q07%WZ S__Y-Tu3.THHFn1IƵhj- x7!QuT&<꭮Wk/>G_@Bd)==2z@ GJƊ`t o:fJp#o3mkl/ۅ cAJ월hI纖<- l[EN"dqQW3C.@܌śpYK@D#,d66.!((ʣ~uҞJp#PI!_.u:ڙ,'Wo{$9}WNC⑨'QhD)QVz&&Mw'i^H I 9b'^Փn#1]*-Y&{A@x}M:#!pHńakwӖ8K_sǚ,1!:rK;m+Z繩~ T 1e5Ì`EkÎ7ӔEdx=4QE-~Ǥ)(H&5J4`PQH,P?)("`=m;$Q $e%„yl>Wl"$g8&MDMQhgvmK'OKfY˯wΪߏˋ(OliBiٔH!{8m٪=PxA>z9S1s5osMmTK$?]}AO]1p~uAFmJ5eݧB4'Xy-Wl>wIa5VQ2U,5J ouf0, 5я5g2c>J6l>g[oI T#x!Ęre ~A R94C?V|~]!CUi~*fxI f4Qrsm.YiTc\?m7?/us"s{V;˩5%fqgG{O0^I '89D$Z[hMxg"y FJRvN3iRm~37iXkC:$ϵ g޺gtDAeJI,\~ +7x$Y {`M|p!>p<vC¹""!W ~=?(pBtO>Hv2ib0j$Kkx+ jE^L<81͌d CX7 !!44L*C?myZ˒oX? qNV2bB0|ߟӭ)IIQh sGF0 (:u6&a`dȺb #-Ig48'.]1|iF;.<0"sFd|KaX{UgE/\͖יlm'2 Y d5 d#׵5QR{5yY]uD J.!yDz._.+Ikވ 7Bz.JyvT0!O;7u* pus05)(;3iC9'BC\4 5CT1kUC=[4|=T}ǘ jޫž1"7$%j7%%n^q9|Fҍ/wq7NF'T!UΦXbǖug՜b(y@wf¬;6Ϊ/wt(C闏uU|C"yUc-Qg " Mk]5]jמڵZ0AAr eRWl0BﴄPq{6BͣEI`!J8BRH]q`F}߆r3 Gt*O{gF:@ѨTScdžiySɸJ(t~* P铐M jt h1XUQGHH[|\Y2&R%&kgm!tX ߦ&'Q8J7%w.DkNVVJ:ˍNfޞ |N'Y[{FDDFa$gQ9n,*~%r8A.4ڽ\RzUժ_9N|HU}r#s#~{ԝ$ƨE63xoy4na9ïΫ∾=5S}Ukۚuګv*..a,Do׊}~GG.Kw[̟Axm׏MǶ=Mע_aOfR־=D$DUw);'ʩ- pY5~T9'_Mth `2'dcgk3mzC3V"޻aUYjv!H$%N;ǻBU6L[B!KpnMTOCo!_D HiFSEu5*3!(zgOͺ!JjxV"?BAfg?2hѦ;=&/S+.ʺȬbޮA)QN88ɛ\n2asoƔثaM'.NTOqs؅1g)>[]UT1q9nPb |dsojx\GNRwPŢ[JW& jq)5d.gKV´syC.v9ΩIYI1^6o۷>=*/O9qU놓V-pn6kVvThĻv7 RuM< >!Bҭ?2 ~\tu˱T^ c38`x[YȔ5jo=[7&b7spvqXJ[^]( nAجӨa'3 {E[/=%)Q|&GcyTGCncn585^ s/9P%qb޲dϵ*Dų]s1b!JuUU~qrx "*3^Uc HI 2YןSt_ :ȖjsJzTȥucE5|OkL{B.Dz0RͯuImV,xU8y(r2Q~܆c~A߶0\vlwj0a+ B+ܞw[G S=yCH,xS 3Ჹd'YߕUApC5BqQa j0S;:KU/8ۥ凓{~sŽ7~y+{==|y{7KQZ\u?ȯOxJ^Zm[oi\uӮu;{$CIZiAs!DO((qJM2pE'"1fBAkoW2Uьb," 1sJhMP93'C5'S|$:d=7k7ϧ;B_f@X$2=N((u@ IaWl^tO;$OkR6kzv`N|׊1 6"@+zTGUُY@|L0 fz0]&YWpXZ[;MS] _K$+ҥrZOӊcS=.5 NVumU5;;ӭNn>5|: e*/reƋѥuwJ(~4Lxó_Nx~FNs1="nN'=L=^i?lRcƣtl_Sl`vc0WHs9;qÊFPv+VW_{}}}}2a:iX1dW 4Vj{jx*tH!~Q3ѕɾ%߭9Lqシq߼eKm[51}5ƆagdR갆r@(rҏ<)މGwO~'Dju|q8^;G@"B8ɟۘ}P(..usGIYl%ߍo"H 2a3;5%$&a 7n0,#Sx>ꭶCK^;HB (l5EUf8NkهM4V$|!_9 a~e˱$<5{͜nR1 6=ח9J^jr^fz;χ_+edb~֤5j)j0k2a$$% p;ǦCKcMSKזjnxf:=/lzfqN68)9Y I7HJE<$A׺C+sچuѨ% $: y$2Nn +RDG[֪+X/2O<㜧y\AǬ{\9tUFOY^=dVษ},F-dzQJ+,վY ` Iنgc,MԮp?ҽ煪9ϼ.]~Jmͼws+~ųFv`z|򻘜ܟۘ&e[a=uT9VyOl~fhٞ1"%EM?Zti*N_gpV4cwU_|cq&umLwJkgٹ'T S4gUٱ0kZW*[ba LVꞩ|CnʫgJeO ejVQl}vikI3W:IVT|{s(`ע.*XnuY|Wi rc)疬!qf !$ 1^B_Vk~^p,^QQZähԣj0/-o#$WPڡ/da(^{> vཫJJZ_Nvdj~MR.$N/ugې\Zq%}oaV$wq9rˏq/;^ZHFO)foL^ѢN+gOdkߡuUʧUTB>k,nPUvٳɺjl:lY4%}&'~9*~alm̪4vfZJipvL?LV&ɍlN"ILc|L;ouћx%M׳Dzޏ,aI;qd&E]Q璴vUy}|t܍]LYܾQwԽFݳy9+Ki<8ܥ Is8va>'a!:"b:W6% KmYx$:u5!^C[\R,+;gï-Z_k(|a|No )j }Clp, !ff? Ŀ왿܂}[{*O[a^9³^pkd!Ջ  'w?o<:ygoCwM㇞̑kwk^<p@Qpz/_nJi@!0al2! $R/'Kb'µirMjsR ?k6ܝKoWǹ ݛ cڤtrl;؎XW p=|CCqXDܣ:cz}Ù"&:ՑQ{ N=7j|r鏖|pz?˂ в9?=qo|,uX$4PQR#{2tl-IXc%2/Cm9Zp'{BMAOYsJ6[AZ=28 x߫-07!l Ϥb#2Vp^r8]Q.50"NuRW5j |Ҟ8֯Q *^ EM7$ڽc5@{^TAtA@K܃ΘCvΧR?HN(ծ ;nĿSgMzVmM=R$`gI$ ZPxJ! {jh;ytj ASုR C";fUa∠9WwC\OĐܶJQNצmF`}C`sŌ%Ny.IRiJMN'JKHX-o༞~ˠkG]n #LvMuG Ewi7mhK݂+M$jGFR+R5pwiDPqS:]]D =4Zj4>7|\΋C @<0q9 ˕'87%'z_Z{1>GԘG Ύ]{))ㄸ|RvM_Y)ծצ$rlㆥ U&jgez&uSY]n率'7y0á/#룖ίtpW"m0MrY1:aa¨UŞ1 Gh=< ͍Nz־۠WOLuB;,hͬpˆj!7W /xK'g|*sfT ,v,6ʹTGפ)o-󨛦5 J9dj#.\r~ܚ5zԱ龷%/Zc.OA?WUsݶ׻8$iMt6rTeB[,b*"Eߥ鋵jXFͥUj-νf{.>*'$]vʪ3,_IW]TDpTR.٬*EV`DN[!ꪣ!LyzgEU3W_U%.הxYU]I .&͘eTnO.MMI*J|%-_vd碧pUTʩWPaƺ*wvLKNMrbi2lk#gkC^;qE7cfUV]7MLc?CڢoO-r50"o R-C]&)'-eNE]tDp#(A"#=U[{hR*S%0Vnu26פ7JS[.YUxhlM<4Z*;Уr ;pGB&GdXJ:&f(E{S0]tǷ䶪[4پ>pxku1l聥KYdw0lDjK:;]f:\Bƞ{FqR+Rp\]5}5kM#!lp*ǡ91D>iX=BPr¹F3IMdaK!A wn0X`p3ˤC|uArA_l(-|Iwc(,j]浚:UmΒ}o"(L۝"]f{hW_o='eUWysAWoKT);=4򖔥C ) !7q:H ;kTn#(kgkDX eA^0# %}n$CZ ׊35/EsЫ975{O#ݔr MG!v>oi@8E~c" *r17 =b-FDf>)6h晾tk6j$ɼ *#~)jK' 0ޠKqTҾzɕ4&گMڥk6\% `l8sm=a6]6R{jb?N^dI>L{>h T!!݋eJaL=Uzwz' *Ǯu|ym0W/43AǷzz1z3nJ3GD1L@&joWvʐ]6ke9Bu5Cerw׆ҞCMn뮚\RPuᾲ 򭋽Wc"ո4^.J&V`O> ]\ؘlFͺ j6&#y 4 TT@'MԷA;?T_u2-MJF=\n D& dl{?.}r!ʢ;'8rճam{WǽY¬4l b/F(*MЧh"γ)}xLR.(VՋ؟!kՅ2)"jΌ31L%V,O %XTok&=֖SOG#!KOJo -ض G#9[9ƮDu=}Z*b5"UJ7ʪ7]eSUAGaclE'>.prWBB p8 %=h@Z\Z8Y̥VLX#ᩜ)/D.աzKQ!/vS.6rr]4]َ ~VT 6/ZG6xH9gt[t BιghvB9oCߍoƳnOt PHRP AD @PA H30)+K$h ,94=@gv2;B~A2鏟bq'??Ă0D]UNr+F 9iG! (>?㒁s\! u K D%)ub2֕P{;'f &dt2w\ݗN%J5Lj<8GJ/>b'CmwC8FrGTSQ1CS[ۨgT c)q !行Sm3ؤicsuT #M4dRN'L,EV'J^  'SuC'8x`cbStBP:-xFYAF#Uy7 "Sw0DrS,;>_G?CȑW꿯3J@9٨Й OBH""X@‹ &#>L7WE"@A9C#ϏT5@Tx1E*;ye,߾),'9| *BE?Ta{>U?u"!2)x~ srX?8$'z 2,S<_SWێM~wv?ߌI@PvCPaQ)=kWtQhd:}(x0`*l|z@lF9i twrT]?q2Od'y}Ͽa獾`zu;SBDj׉Jȴ62J C ?#BD")h;C=*D )@y:cO&/ә}Z KNI/{B@E;Pr-h ^sQ;YF<;Vjpli#̄$X=д$H$ \շIj7]R˿qE3qep0{T`)#yvM^arJ* 8v1]}qg[98'2Rl{B髽z5_=U#|R`@`utE93$}ZWr:#EsjH0(:\MT9ZSGa-㎣y{`x SM([)t҇<(( ;✽ (6,*{KSȓ-(`q+"!7@Ÿ'd9,OHzAG@&d ޏAVgf'ǫAF/o7-\)N<sV[~~ nog2k d0%AЄ$y\b˓UXp¹:'gյw<(~zz}ܞ4 F XfuRh$פLU d f FDA6^q4VY8#Fω jx<8{ +B 7uzH` pcؕ^uǵ":.IP+@r[.y K'0zT >Bjc;UX_)E32Ĭ͈L4m43I'1D<\G? TzLvKgt  { 1 a D(`! - &lM$tjbcppaB;CN& y@<{6v1?n*3eiKRuNI#я=LJRz/nVH;YNG6)*daBvpf ~ƈ,gךn:rpٓ ب#4CbSOzn:DX|HJ*+ȏH ?pb^Sտ4_AfJ' 6lޝz|sqϢéIWWL1"Y)PD#Q4tʒ;ϟ3N'v͡!~Nͨe|Êw:S:q+*N֚], ohr}<(a~# ƂjátX5L))f)ԓFu$0CtNf>6AY:f|v IQ@48~cr8HBUE^ hrG(u9FL+B,|HLWͺlS| ip o^ N'WK8@8jRM@RQ JP7XYam8 c0=8̄AD"EN PoQh6SA^`8 gb՟ON:\TQxBy@pe:H^O&.{|`{J$?D 8?^}B|݌"{ Mm Kj Pq#\I7 8JZ ,~njv#8@%$bp >qsJ=p seb>tJӜMCt֪T"!(ʜrNӐ&-%DA%!I06:3n Mhd!F[gSv!6Q=$ 4y$|7]=6ZaAa#5&wRxvpSa {ܾȫLTP'tƻ,\F=8< _gF* [)6ё sT8ƨI|qp!Ha4v>9&F/#--D @. l(q\A |2rJ QZPd[Pr* "CbqtB G YJJqx싰Bz…ͅBG<eV狴~QEu,ropR=Wq{?cC8<\ " GxRs4l:4b`2F8N ꧿BI`=ppov:F=MYc[~4S^sAWc$oŰxʞ ճ@Sĸ2j|aglҁDyus)H||{e$y& }M2t j@h?&@";ǭ< X3%!jP) *;߰p 0!>F_qhg2 Mj F~ót<A>z|b|M{`ӐaPѫMRjuĿ/ qiY?s4a @d-v90יÉ{0C衪C4ey[XC@΂1h4e.Wjԭ!&xkIӌz) /u6҈v4|{8J~]-Ob>mCx^á6*!ኽB4`y:Hn!x C? $AH>pؑOfINIqٙ ) oEu &KD1_.lu=Gi"d҆v4T{%m<ǃRR:)J:A\>Qb3)1)UzQ< k)~m^ѬhwtUP^-a? C1*I_v_WCǼ}2x!'!(:Hb;hJ@rdv͘.BVB8SC5+E:hѡ&j Ku}شScQ!$( W*iX_Pf#dYE]Gb)V7X/|D~V3m@WuHR<:~R`ց6I"y2N"ä̂ҐFzuO?geOǯD\u(ݤ=QsiNǰ&WZI$Py8);:.Gu>pyfΪ[j_p`=X q 퀽:ogo'dL|Kys<5Լ< (R*} 2,:@5 HT("hC;0r@'dj:'5@N*١Eb89ԣհߘl]{C *@,}!L?%}evv b1k"Jl,/N\z% H " P, m[u}H{؆` B!6AD@M",\!r.k`5r~@ 8*Γ4y}UAR/RǟΉ̀3[C:6BY$8mZ{ g3Gɜh%]ͧY9?tۉ(?/UUĂ<~R+XWcM PO>e<h[/P_<'mD5`{eC`OSN  %wZu=5u*֤6H"iBPK_caWQ3Pgpz\_G g} ö87u&aB؟) IDH[)7$-G_N2lZ|?wͺ{iu22RԨ<ӌ[=8Jc_&]*E-U&3EtЙqQ2%<#Ox~'t'=OvCDLC@|,*)"8OıS1xw_vEx(&V &KyExD6Ǡ4dz|'G;yxH099x9?$Z AOwFzl"50;`{,5O`$u;7ayb:6pA 0 'a%y@aE `t3>bX!h>'}>q_1>Z6|+XJq[W)z|& i~:F&x *).#1p'==ڗ);{ef};,IA|8L@^a+l.0DvJ%yB 1O's0|M9t$&}"fSF2p @ pPCe5P-ۿJmf=vb"I6Ay%X`дˆP࠙`'(ee+?A6v2S^tMTA( 1ۋl7l!ދVO!58x,b+Ӿ?>I 8@9?{iJٴXJ 6ffffbJPi kq!0?[(`ǫI#  qn']ձA:;t #/]6e*WMG]MJG([惟`)(/cLYAQ*ՃQfp㗩{,(,<Ȟz^8F0+BsK[+F45ih"׌Bp#9m 8R' ҧbŇ~%PАjzvEEh"FPj: :h(lPl=Ci(r0>"~= ?݇cS\ʒ\.{X~9'd tS^S<!dMÐu {o?;ׄABI`!)?\nCtp{֐f¹GBLLD3 /ư>/p}mmkؤyJs>[PےE ~0~ cp0œp(wGVBb".)4'ʐC" (膻i٣x7A$=)};r@A:j;P٥Tn,Q<A%߀p-2-f;zM45RU07I`ۧC*-t>2IӔḱ6z#Mu _;rIe\gH8IT'%'dWF9mG{*6y/8wd4'GM9١BL:$h;p9L nا̹'2nF_ ({9@B{@=~'!MLx .Ø8N9D&ێ4RUp|GP:9HQz wJBD™7@;vB[KͼӒ>/Q}aؑ99a:;pzc KT4CWvW\qȃ?|'.PM9PӉҘ-V-USCob6*Na4ts ㏉ ONF$&'PNzkA@O% ќ0x$u/5bqlm}( `gu# 1'"|i?`jPO/ aԈs|c5q:rN@_sST!]nڿG<ZwOs:5-'Q/x˪,VCpWb`p'1|JY"ZSqi0D_(kn rt7;sŒ}sU~OX9ghLavx9Cxt_uV3$Rk†9H̫ݰ|GuHpS N ;3Kt36{*̘+E@sRĆ(9FD|$w !j'q|o[I\2h=x>89ﶔn`TGu$N (>ygd& AH{r )ϳCOAUK gA}EzTFҢ P1Mg~HL7y ]**dhwB#lk3qyBgoOsƤoм^((F~L:xܡAoˈpY l9MM`B{I,Ȓx JFA'L'o#A} 7NmPȔ`I(^y: Nnkedk,7ATy”4f:6qtnlq3gkm;( J+`l: 9aPC0FvNɚ=>!i1qߖ`Q@+ى|շ4q67EJz=b#Ă([RDtnti涙kj6LHIA4,|8y3TPr<.|HSE<)Mc ~8m3SZrm q9,f ȆS>E0_;@誈C!@Xb}V֔&0zD'P5|&~0d`yVh=G5_9bP؇ABu))GY܊bP:!AX;v?\C"vΉ)fs-C9rO_zH0U4_(_S=xBhr<8&0Ov`SB y~pث%34kp4cɢW4;Ȉ Z:GIzCSƌ!q^a# `媄RwwS ÀA}38!r{!M59;d8H ƼPx% Kmty418X9$j԰$`y\%zE4cB4\"vlG1Hq✬eG1yTT(wn#ȗÿδAAE% CQQ;6-ú&}"H `K9C_>a8*W2BM2Rġ\i? y7|ufkw2a6gHwUBZ. wyE Ї ID@!ӤW0l3$&U8V`Pp(أ"yC:Pܢ O#ψr (ZJ]MXAU*|z^)Q- xbVKqA,'iv&'g-D D?Yb CF, ף~==vhA=^2Wgh"Fޖ$ҏlS _qS7;"ȦtVhch,;(S=dbvy! 7!(߸(0r5.Oɠ;&PG1F&DyQdcH۫mo)~vq(6=}sW(P0&(śtY xp ͇Pw`M;X>2(t^aJ#MƦ?7 >vO 3ZzJOmF}X+Q<h䑉c[k^G=F.@0=ݍտ?0`P`۬d&f:͢)DMfK .#O~;>TKTT[B x>ēͤz=6oW\UG}I_K?";$9l5kx2C.QUT<(;j[7XIeFDOtٮ$l?`NVOE]la}gic9n{2O%ׯoy*R,?,m3PJN7FBXBHT1 ;tU Wo}KzL@xb1D46SݜHKNWx5(z4 Zy0#ѥpǣX-G/ n=O E 9|Όr2rНιeBq7:9pgǴǙN{lEsx?a0`WUKYUrb|bH.vޠW~&=p*}q3WdVȜ0*yeꝳ#~otu{T.uJ|'p~ߛ3rW7J?}:0?)"Q?$!NC@;&ޕ"Qo]pTl /ˢRe40=<w:m&O.w?{ ̙0;eyMgi*gf&_'^&|^Va(oR˔Zʦ2=̙+s-eFn|#Y6%oWӵA2&c mnL)xS8_k *E@S$38+d <= 0/"kI$'y$$ ?qQQ yљFCJR/5*HTtRЉ..?ȋG' U'}=G"us:"*T+޴'F }ryݙv\݊6OUE#vX#f Y/ST^&[t^ʩTYYBVQ"Kv~aݤ|?3tCwYk¨6+H74~ű1Ȱ݇S[iy ًy;}Q1usPe'RO(JP9?#tW~yiC !1L g(IKU92E]Ţf̰(bTp&G_L>#N,p @& &2S(VziR##יUef5mjmFU]yi"6'Gql{ *4pl=LeHe4tȡa&LߑC8o)"H'^˅p:~ Լ 8Qt2PfxYS;2EOkkp{`"x]QU\DP/|=H(`L?LYW>FQ>RxiۿE8;y&<<ܪ/4>$J,k H 9}nsl7}Ÿ;yEcqa+1 lu:䇕,":U&<w1!xpgi~C򪈪6Cf͈? #,2,kUQ] 7a$EͻofW\ O<Чf^i鹪mfuL>$Z"r¾0A = Dmdv>(H {F;?a5XvxA B۟_$mśb^805aKvsu `H \BE! 8J7ٶx!5y6 O& X@, [+ B#al`5grm:@bjQs?|5p10ڎ.ŧ&7!6ƅIv\| 0'm_vOoA6Hq \__ϻ9<2̍)bѣןj= nک2X+!gwAR"GSQdwM=='CL~xឯ>S|cyV[_otXם(~ kWo+z<v>l'bpD<ɨJ뿼qHJj5_c7ӘW`0h)Hya. <фhI5BR6 F e&gk%ͤͺKʪ[,eGw;2g$9.*VMO-6.hU->ͬk`I3F$6jo9 Ũo xgmM2{.̉p|&%l¿?n$Aʍ"D5TؚSŚn lxrʆ 0IR+1 $@{wۏ~Ky']xG/=P4H0!p-:MkÙYTk=DF OxeΑIZ]/1~oZÜg7;nW f.f&dpxՂ!de+5ALj!5c0A1떰d}t*Oҁ|^wo!H!$X1$`qP hoGoW3h;;)}:yOƊBP:#KAu6+:/{7CktJv05k B" o̭k4tZL@H˞EmOy's巋T+ Nn0fD`||䚦qqJN80e kJSt'ՙnSW0;G*0ncZ!P"ghжFM NtHLKTMlWW\%Ee1;C?p492nz''26f˒u6NMCEeӷRīBB ^[qF8Ad !FE7X6|Y5 { r3*1U}i:_3 PvkqtIY cYuxfn ~@;1&.F`xYBHB] nckOׅӢNDznj RzPA,A2a ^CD>1Z Shf2o1~G}/n¿wB?h܈U[>LϺʓ˙IL;:=$XWF[01ۼLxt  DVV;;;81Ȯ4'؄r"rkO1BNG$ܹND&B (F5kELbd P7gԞPaF~cprZ1gCp*Ud4|7e{XP1m0 TWALyqz v ϑ®!}h7^|ò)HhF$((ѻwy67;%Hr )ܰcHsϼi,u#5N'PLGwZy&9O36pc P8tHo'EJ 1W@b턦 BI%h@Hfb{VC {xDs64 omhyVB>3+">˛lYQ$!0N C=4#0шq1Ox5,rN]Cz4-$Oޕ)@Rb|P !qF*1VK [9l ĢE0fLCz@p~P|w4 \lHf 砹 # "b\e6ޞ"mO|y*޿.=qa©;sb5s)rgC˧/yŽGyDoBPi !u}$#jqw#9"gtD N@NJ~x_2) 0OC}1p> 97mW=O/!B*!c#p΋Njc9-p ʍNR{J QU,rGwX[r&.H1ITEEU;j0ۓ3 ž"18=:p̒b0(\I-"NI+V&w웧Fchݍ}-;3>G iӾfɁ1Dc@[m2T4ݡ{15fta¹:]C5 1yڃp,c Ldt88@Tv&J~Q^[rb3XzI:4;l)?YS5IxO}w@zwF8ɹg\n ޥ(An\cM?C! @I)dI) 0IPJ^;Uw&lPvx<V^z#mv(:{54rY̼ ͈eihܲOfEBgwkkk!_C" ՌoC oV}wƹ bIs6Y}Bo7BL=DlC'10v/2́qf72p'0lps][I#%Hnw稜'BC]^:Fa4-AhT&gbfT8]V0]0LD)S&Fƃ ].!kt /t[KP:{(<2`5M^]/%aD9ZAƴq53a;=ݺGQ!a<{]ܹ^ƉB'$qoc2͹S 쮽~CYdҷ73 K6rۜɾ9&=J7{{o ʎ^N:;:k*Q3sx/T!/L緄+$84ICa'C3 I",)UfL8! " ("MFlމw/|H(773|U;=uH%`)d0 n]Q= gdSUGT&co ǽޝ4~d (zGLq6H(3 (WR}>%~KҐ\:ߕNAޯF͇ژl+f1U;W U8d㧧u0wV#d !y=!wIO88[ 878z]Y;oD<qy!ʎGQ}<Ӂ1%:]\{1;J؊dSmT~ 5CtB̙v;U&[j}PcZ'0I䝊{>PRu⑕Qt i^Q֏_Cfs!$)uĄ281;yp66"+ #鯖>b?'+J .˴d2Q#M[$PDѩfq+shm3JɊ(t&[vZ=5 [M@XkDp4-z sQ{IcK$g[/m0\32wTSxwy0 C5QP6˞vE4VQT^b <(} ^'^§9]4x"ș' S`xu%)o%c}3_:u*J bShID#[bT0)Ը,Zq!&s TbxzJHڧ;kAf V!P 0,v!E(3 ,aj$mA#H| #=1C;$+|Ϥ'OL&1Hh/BPuMN+~p: "LR# vALy; <QBS){cxêLjSP~Qm`݋l;JEodۉe9fmH&rC 23d4a xnX:(F'7V*:@f1sdxBm.-xfVB$12,H;-Ն5B8I$`&򩤘==hXp{MCsa^X*$m&-#lqBFz2y"8ß=Ǹ)gPNF>Cu=~ iD$Xf>~NeX/dmI*16өL u]`B$I՛Dc[ qʚ"x4l/ѵ-=lON44vZ[nqGIgvO&ְIW=lr5f@vCg-l91 |E;@v=AI'we;i׭ ]i cz PTQ!/κ}/#4B]A,In:{vxO3/DSXDs4w`$$N-Q|S9yt)L! 5]27;[ `n{io j$ޫO#!|p cGTbop*#u!%Q(;O]9`O6LԮ5'@;PZ!gv'qe@ 3-Ҟq6`)|V*--JJ rqa(~Vu6=)>I$ZCBMŻq{ڧ-6CO&`g C0(ˆpeWX:{fxDOH%r-2ʆ}KaN3;o4Н'm1|xOSSRzQٙ ʘ6+bZ .8]cz=GS/;xNU@ҋ}IL[5;8 }m EULz/N^r_TbΤ=S2I׉X&b<#]ƶbmA #: ~u 7!9A3DM+O(XLV/ǡOX 6 x)~H&2v bq" ɘC )4X׹|` %}PX)"DJRHӂӟv!JV)Sŏ,Ŭ-NT󢘺vv.h d {oSCބC4dSa N>z"b ^  $pHMt;CJhʍᘸS+s(N:f82@za1?, ;r&8IY:R Hl DlǶe['"%)Ԝۄ\Rv}a:!B)R *g:ڣm*D #\&RZ1Z˿RXPtD!@QJk13 4RL gv}4&OJ~ ?L D4Ϸ?O*R 3SA8|l)?٠d%p6QFTuM/!T ?kӍ /-05V%2 gpdI.)ӊmy.^J5¦a[scZ^t)4RFhIu yɠ  $AԹ`0CPcB .$']A48!y8_C7ao !G HyG{zr㞦B 1P,EdQCPAa2F40DT> @a/AȝmI`_s5~ӑz7 IrsvA Al' )>6Xt)KC~,@CsV*z^&8/YRw Gs)J;bTX ʘeB=+0{Ӗ?p{R30Lʦ4@a fx PNbbH"Is䀉pP$c#ËPW=}If{ !f0kHS62p-d0b4=() MŏZo (EBE |=) i #y"8csgq԰CZo& ?F0B % M xKNқbIj_-@l&yD`u b`d`E !Zy$ =wHHCDIE % BB"1B ?Yv` =Bg/y߃(l|`=O,ЙK%ĕ䞑ܮHbh)Z8! J%$yjZZWv?^?M0ԁA 4B%$HFû)b1Dgt,:.'}!$10#D>hBh-3B2HZbEG4rG~y <2 !tGlOxp2Y9(fH\R`_~"7z OA_h>miď6LJxv#v6;TSPZrA^ g!~<=}f& 8R|MLRBI pH%\:EDU'`snd0j z,aY)a_ C즉”Y ûm7gs>|= ,8{i#= ~3 4!#ɲ FD0K#'Cy̌&DžD%)W EJNLQ6&J*^p !dw10p25,DAFwIl:XU2&Q}uD}5nPɬ-k )T H1WV5E;rѧ4p|$/vZ)r%C CGKSBF&VIRSX 1ݣNN]S# ᖚ[!&D;8\ jL"G2;0gDys@;qyZӵҨ.z` `0hENT~v,Xݤ/#+88C[a][,wP=Luh9;g4/$<tYܝDZwL7|3`9TI$ClOc2AO`zz`FwEPQ~2`b2#I$F?#oìC`?J|a0<ڦ#Rj)ta!(V]@X =F0>6) /k4G`! 'FT dCr:oCaÆ8P{%v$@ f1i61g 3dIAє>6 Ԉ?_KzPԞM}bGnݔ]U4CTS ޯA" 7k˜UIQv Ov\>$w qUUs޼>>)<@kdxV#~pGև-@O͔L5ߎM8+:("'|PSm6hpvnٽsqzf Ґ!UT孉t`oݲqd}B)ݤ5.İFuM҆"R$Il;aqqShaRS;cB _Ӓ&SR~HZ9Tn,V]ƾlkIa=Qu(Q%44P+ds , Ɯ擷0!WH ǖgx,Xf-I_Wγ3x`t,^ X1nu5M0$qO*okG[b'D4A;|<).NuH칀hOK7O4>Z-+L>O0y}3=N-NPP> ʱE[\XLc2d$t d?SIinzAT|Γ`[px'\:~ \=H(|Ia˷_{|@{S!Bb"b!JP)PX`>=B `|p,h(0((-O/&AI'$KơT<@`_6 l@<;h6 #M)c !,A}"K]/M#i2 SRSF"*")G^T ɋi aN2){7 ͬ\H@pzi^Y{^ur@ v 4dKGPqI?jM[w~o4d ŷuNٻ i!\QÏ@h6MTh+|8jRl=I=hJ; U0ʝҊvv?,KBml}́ fNDD(R#lySs7x[볭tN=:<9!P$(Y(ҘL& i1G[8u3C8g NGkUJAac ,Bm c FS.f(,jJQ rNKiD@($~~]ƸWefd%6$ hp NINlRDDK( a#~&7=AG$"Ä6 =O:r. YV o.ߣͦ~],Mۻ*q$-O8kx/!qHfY)DuYFZR5lJE) tUTVA$Ɓ #51*YAԝ ,Lצ3cD[LgdrEi1-dRS"AFӦ!rʒ"KP` :q,ι.#D҄C͖6J];* cT I(PЁO- yMecA|çNR` sYx'/KN@d >GKX=dKH!JGcxxw &A|N0Gl0Y a$iT5UT 2*Q([ tdX1[` .&Lk!HP.hq6(k@A32E@b$KJ c6ᙃ qVD$&%P Hʌi38!v(&4A  QAXNtZVe]{oh~Pm!HJS@r$g~I P$Sg^gKç+ Rp?(Q96"ș!sed0( v` rzaXsWdeno$ }]0VQ 5nK ",ac8}7& g2FǣNu,+ )W#  e0DXv 2)J',/E6MgE:A24>`9q`9 tA($^1w1&@}a'b;"B;o+!rTtl) '(EbY?!6/w羓pZ%7:8ru:n*PK[lɑg <|K t р̺2\i?2K!ܙY!HDu&#`sE;ǼEItÂx@>:4)~O$yf="Ilq /y5b;q§K@{J3M>؉PD{L8"PP0y;>3C|2PrA^]zGgםNH]9Sq?'!Pi X*S(! i k?BAш'DeaEjO涔V2cnCAh4p耥g_`=9t +<X!%vkЯe-܋'G#T0gQ K&܊jL>Y*W)RCuĞQeO>4X jkB cv̶ƛpa-ͅ?|Yzxqv3^$xh\t"gd"1=~~"Q2i#6 &%}1F0Cq9O|Q5"?$m]i F{(ǞӶʧlAwC ]eJ8(`y Oiٮd]G6!X4B!`^ ! az 8e\:Qn> [~̝`C17HtP#4AT#ӞCMZ]YW`\Q*9jy&ebYAhz ckIlj,$qYG5qm(6!Ă6XKELa\͊IBC8зH'ilr|p{x.>RB  փ\9G 9NT5 P>T=ZYc*["am!U30h.ybCDJ&Ԕ$.:Yr?d ?T.2*OjzxEcPNd^V\4[ydJo'n\lopzoD7 yDd6tTE:eoS;{*ܨUx`LA#UDx+`;v-mE, Nc_q?b;b;z^/xqE0Z P \vnRsngq?86dW~c}󟓆2 R|t}ʇj=j@#Wg96]-*]vC5 ֎!4`L=T}0:w>F lsgOZhz($ .#fnb{BS,W[ PiO | K+9hd8zzJ{0IT0LH`ӟ :Ciyi$YƟtN/fUP@XCb0c%kC7ܞ:>Yu_a&d C C(ZA<$E*@"P@߮  +&4 ӝMzU۾[`dth$I#:8h,&JGbo10JB}$Q7u`&$"Ȏŕ%_,3b="H*iY" B&MU `5^N79.:q/H- >z 1)a'ȥBaàsTDDCN]Q  >Ƙ*e6 KC?m ͏*χ񐇂 ncDG8DML`-p3z_H`(9PuΐKxfAqvbb =`η 1LG[D:Ob^q0=qeCqՎּ܃=f @$)) B%b`BJ@bte (2 I!<b%!CHJJ`/(.GOQ~Jd) #/(3"n1<~E($C&>|CL0;z"<,"l'E_&d9BN"DRavg,! k;4e\H۠!C a75ck Tg! æs{vS"I.Ĥ@Q3f1<וT<g]g=GM g?oUV;qyM{::At@VGq 6@^;9!$LLJD-^xBGLlJN~J5y ++ӳs/sͺ3PDZfbߓP =O3Tu5fޣyu(FqU?P9o|E7_\zd1mt㲎a&~S HvRЍBB9+Mkl2F::֤a&ӂF'hj|˔zw-\ht`״oB扒eC0r,AvHT8!c]J5#}<=z!L~`I-l H3 6NDy ,of@Şf޼ )BA\(Z&y -dB'lfh!wߴ.YúawC]hlۍL Fن'27`5vz ]ʖ>!ٖwކyWby=r1`9H=fjr|yGA$^6 X!`D{E5&en"L=tH=;jfioxBmy-ӊ$xwʀ;jGP>r3GnSt78uTC m)1y9كhS~#8r1t+U&7 @p bM(8( BaHǶLbmmσml:_&S=& `Ʊ ]z( axLK3hgRdb , @GP A8A>vvFsA[8C9xol`b0q̝߫U1#"qL I ȯpr;!_(IKvGnNA$E!,R˻&m.Cm^jӺ 4oT 8(O,6CO" Kr  -0'}IEH >Z*ر3.DFõHYBldԄ?jYl,ݾɡ&I6 $9Ba6.1`bg囡yn7 sU('R= H7(͌H^b"/ >jF8ɭOp=6gzQE26i4pO\oF2nDpF%ƋN94phB/DH[FhS!Y3Lӳ%CCn.U?/"vLĢd]s7+a8A9U6>Mێh~k> H@P$W죋ʳq0:%RTuT !O pon)8nu[Y(GԶ߄Z*x\8\C9ZGDy꫌N^*p۫a/\Zh8\EI0B*g> x|xrܳI◍$,eHk%A".9W~8ǟ -)=Q_sWaOHgƬ)pzk(Myz 9eqVlـasa@9gF}iΠTp! LyG}[`IΎ¢;I͊2L)f;;s%yw#!:ucԎ<]PupsO "Lh`\;5pt HB wz +?}wleh^xw{w5 @p lߌa$"x-$Y!";X8'zs@<Ý\pa,fn@6&:bCg6"zclBO>]AG/08ڨ+5݌a lO.7I9b:|"pjemŝO\ɦdL`P Ķ(gl#83:C&w PPv)^C܌)L<׸@0 ]ҝ-na| G{S%Nd0Dd6@tf$rj1Xર܏rl)2W]0u7kc,R-;bY7~MJo5o'*{' XG *5C`yd'bǡ!m0'3SA21%gA:c\tЭC; &X> l(;ou P{!pG{W06a:Pr5T@7V@s"}>J B`DtCH4"R12d& PhYaOK A0B=߿ȗoP0Sٞ$*@!A@) 0!O$p4ǂz#"7u`5䁃#nNI]ćP*1joPmWwǾ!ȺWa p=&mr?u%`R $no!yv0/_ƅ h6GN2>|A+dxNN\Q1"\`0j6=7'<[ ݁³;Ci,QE0]?$@GCۅsWr1qIyguhv9BGȭIт{ubC̐Ȁq= W٦ςP!"$Hbs B+oJ>% Ư; w??8|_ 2*J2pď)@V$i` -,ST  $> xgŰ5+"`9rpH ba% xePOr^HhH6 1VYff$}r!pdt.Cؾ}A g?kLBIAm5ݹƊ)Rih&]=x J4튕=B"5uS`<4@y`fR vwխD́IHG'Cj%4`(&H0i6@9Ev é]?;.g4?q;t'gTa .{Gi*Ok 1t0B>fF{ XFC#QFxد ?t@tۮ4z/;ˮS {.3x=RR2(9ld`#UE449q!;DBA !&J3QlMஆ7 }_Bj"@@d]@]D4zcZ&_A@-K/0`,a/}~\ϓο@1$ $&̑;!yE}J C!(((3Ǎ;BΆ^8<(Н(aNr!$)q M`%@K\p5.SAs(qo* ҍ \Kt!tC@~ߠG>ₑic ݀}?wv4$B#oA )Ü4`u/씄 x36'.INC`φ.: HDcj z?xol *_\ŷf)gbD3X i@'!娻kd8;4vHTilq 1 /{&AD%Q؎y9]4 *NOavil"8)cKAG@IorAs܃V (:bZLl\0Ƀ(ˏ:fwm%s3xŖLtw"qm;jN$wATKjas}W!e'ĪM/snZD63^r'3vy0itN0kxgv^R= #އGωîZ00v{!3O&ÀYa A`Ut߃@;9点a=o׾/:õZww=i^P*#K8;|T|XM7Ĝ8[1ycNðwVNx L1O&)u&%,U7 M`("C){tSeyr+nM,@M 0%{NtNO| @TP4 o$ՕNGALBi@CDzEG59~{z7fT@dʁD?vY>XrT>lJbB@vTQn*{/<^e6:&tE&!90iܗd$Nch#f ypDAcϝf00[" z`>Ҁ"‚Ζ!Q']n:<3.@K@ A|>%S2c48 @ POAA8 L Cy4V?֟4GL$G|N96l'iD qȌl!ec&91$/DF4\y9jQA"5Y杌-QP" ɑ5es₨ r= oi~&vrҵH#p6n}(% o$D馦ѯw!qbNw432On]<dJdrqІ2P8;jePd,Vsf6^N,q;r}I$3>gNC2{ը|QGWV'mG3ދ9p9BCJϏW@GcCS w{UDI#BDWq#=E$spO0J/@ X*2JƈX"hkb6~h&DS{AIb !ĩX0L ?O9 SP8!э"H)i,4@|1jI pUNYg) zhNR+fnq$vMb68>MmIj**$E' !IQ!T H ^cˡ8MgfS0 hYPY2 I* )wd[XNx IHRJA(J)Sbgjkoe #[#X#77;nwN=":TXl2@CHTDTUlt:rnM¢JddH:ciO$\ t"bKraHpD$@%)HE2 E@G@a?VDh10ĜKxu K8ј!Z'eZNZ(RX5 #iD L1*`Ik"2 ㉲ uQR`qETcI9ɁzrO %;T.Nn۸jNZY,I_Ev8)tAn= YaPPڴ-_fR`X_ȜUCyN̪0 *WlZݍ}eݯ+mnYeKa+iQ`+z-%ls%O\˞"ii25Ք.|qǰ9si!}rPdE1&@7>O'ks 3-1`%@b,|;\K@MكdD=0tztt' E೥rl0eZ= DηRJn:ɷ/WE_xۻ);%}pэ!ĵ@Fl~;8, ڡ/'sQr1 ͚? Ō8IB<'RXq!ħ |gç5@*4H' /)CчIY OCY :i+izΣkH>B^LL/=X2Xڴ:dy珴P+PwOL45 mh I^ITtP1CHHoLvRIy` 9 9wLkp":)q>@wy!r~f8`54wkths!+7I}|R9fLL(u(\dw8i'zI1$b;3T0ەϞ7ףЦK%T QՈdM}SCT8E?(=OɆ^U˻޺P%K$'O6R7%#|?!P1Ű ?3C-罫{@:ՙ>x1aftO؟"ؠxp `S RQЂ[7I@8 1dǞp8d2`,)O?[TGdV2aÎİ<=  1Q`5o 'DZH' Lb;Y;>,89(B';J:YO[s<Ä~dq-@gELoh\pC:TuyH1ZP=W{‰ ]OD- (h> \ !zG9P)UlrAlF `6;&H@šh;[{u|YkLcpw &PpL AD4I Ae ( } njYHpvG Wgc QH"$AL4@yCH0/ZؓbP hЧ%Qt0R`SH!P|v@s}M;W></I@ s/A=0'P3V h@!{lLeޏ!pBh Tᘤ:r닎9J6g$ц9x$@@b"">A )4(I&"@厳 D1D0Dpe744)>s@p( `5!E% Nb$"bCaR@0,ۀ Ia dPn"bD"LQbo0G|J (̢jvЎ*7Zh7ӊK+Nsd0Qj29 v9 7H]!oGC ,`^XC%"QN >$Wp9&>B_qp4E4K"ZH B: ϩC)JS#!Wj 6=W+ށ6@Q ;? mR,e<(+>9QN_0ĈS"r|ϟb<רiQ+I CeŒbvQ+͐WQAb%- PJQLC2+/ ꏶ_H:d̜A+D;wNF<@6J Q1ERmܲ A7:Gxt?G P#ϮQVg' :TTOJ(K0qd/bF ~ ւcq))q.rTV ]15 @D2&xM88N&a`'itIE b37B 0.E∱ĩM{{Dw1eO ԶmlAW}}t==xW誻Dr?jQʸN 1 GF9`v1=!): DM"أHQ ?($u?@L| O>q`D,dpO h((3 <3g_=1yKxN Qب~wHL7`qIk΄C5$4F)a,:# V!uI_.PMi\@}8i멤 !B"$"I)`""""(`:.zw֐@hLr"k#FesB&/ l9!@3^OK9&ZC!dmGoM;W'_Et8gk0@pfd'jO|$R͒&Q\jd'?\~a "9 NGqIbA uVXͅ^;0[PEjkz9 (GzBP ,4+bA[Pp0mm1sC ;AIBSDJ& M&tt/HD"^aX! Et6bff fi5B(@'aspN! qs0U`qÖ7Cl- N#k+]T WŸ8_UظQdA(D{7;MF=^F'3JE;I0LiMJ+Heh<κU; Bl?U`Z!'GVtgR,6²JOA=ӷ'>CgR,C5 CcP;A"pA"瀩O" O w,7x2~x ! p1H$B!-ɖAFQӳ:?_Mweލ=<Aѩ!TqΔ=7Xr|*Rh{C+'2IA4I46H] юQ!d'vlơ)DhRjDR QJ)CJBBdMpM"WVJ(FH!I%Nh: z3BN ᨧdǚ? ߜBO65T=l+HK"PO؜Hp>L F4FG3)|Uy3`ƺ!$zY?0,r9TZգN^xNh!i17M7C!?ĐAed@OKsDP~w*~@pȅ(s Z}A$={GV{\ Gv/ >?[%HD(PL @Ik]3`CA?r>ݔ!"v Zfu_φ?yab?)kێa{H} Gr"FN䮘RWݯNZIyt_4fv_p9Rc*NظNf~ȇ~_Йj?XtP /QE ! خ|PbH~V [ Η[08Yt@ B#x(h4LM^)N{軫yXD$DD`JGD `?/PTݝ J`(D#CHuH#ަ$yuD B.Y$,?Bѐ?$0=?jD "KO OTy}:kL ^G1#qROHNF_,9݆,[3=/45ӝeZOM ;}s8a1^] wG<x 8wG˘lr z:px5>0xɅMvAe6dZT<'D_d]<.4J)_A}Zc]< gI8>E6Hl}GcŎ D$Yhtnjr D1Zk(u&XI[?2l5a*òLͷW 1g`8K:;yd?L 94$t#]#>s=Bп0SԇfeL/R$h@DUPHZ @OXf9Lx<8q~`cOL'T2(Zґ7;*2 \6Gm0'H&Z t&!bQ e=ئ)ó{` F!ᾓrD> %;}?-at=L#-Y2qJRUBV?#)6$*""""j"]Ԝ,&[RQ 'tѓ&~ͬ ʐa`rCU4,a4THE)`Ϙ4$.2h woϜtI2ަ P8'F6Ihd=( "aH {}5 `<QK0UTp\!9q(p:/ÖۇE=!=$~LY ' g \17yjfjߚ#_ 6 !vdCǝKvq<⠖giEO U;1LKnC fBFEu;t{8`p 1Dɠ1"N } ٧xʱj&Zns]> `m(}4+SDS2j(@J!LX`JDҎZIgBQ LȁL|GyJSTblSPOrޅm"\z6 :S%5@p)D|x; M+C971l t.0| 8nϞ5b Sn Qh!t1k]w`m묾~AaEO90_Z9|۩K#03NL{˚+&oѠ崅j r2Ir/C񟎦t95!2{C8}܏fz(F)IA<}CP:%vO576t@?JCcObLM-H;8gw]$`|D$F0(aX`e"X!X%d%$ %?LAD4!5@,BT12D,E%1LD›B8^A!m]{fLQB]6X&ځGt߼:=ET)TWm 0B1,B R xl#ؔliˇ On 6’ tC!?U& 4 TBH#')P]_a / Lf2wZxbr>?~=)נ)*uOd`)ДeЋ̑8h(aÅ9cxNFY/mf Hh^l%GN'd=%51Ca'An󕲔ʅWNP T MN `%,o#;JM^S9E %$H K0+fmE޾HZsj6I!6d bYLAhNx- 6!Ad7ytZOCf 2w:q!_!sBt2|_ BCa|9<!' 9tV8^<<+ 1C_$,˄' ҐrL`e߁r B)1]( G 3].DFb~aQ(S6o`>%0"O.E׃@ :u̽aG:8l L$--\()SIv4aݾ r2$'KD'$ 0`C$V*_5QQ 4 Xp J2Ϊb&)L 2@X/p%;k&|xjäZh1Oy*#!0V&sHiCޓ'dIN2$T29':RDQWj0JsB?NXv ygH]I6f9I|kh,T`!g)LՕt#ci !z䈘k-"ҡMB:0K!<'{;CND2jDVᖂL -"c0!J q$+J<3ҳX||"MUT*.u ǐsjB `c.6  N'a $? Nl?Lt|I/e~ImãאӞI<0($sppנCRdYplPD! neJ M n< z.D rKZCXMw,1EU, Xy y_^8p`uά9=4xv7<}=yT;"7ZF{!_Ow5*4 yȕ~d7Ǣ߲sּ{@7B" &g_w~]0?*`(( *+w| > J䅰jy";!W 07Z8/evd%"S1$;d vI dDD xi0jd$ 2b`ǧKbA>*L@ŤE7*ggtM [vl:n=w6CIkO{|DM0ULH 1%w"CE)Tϲ:JPlb ˊ!鵤?MW=juHn|*(.8zĂ"c!  q= 4RJA}8M{z}\%cG?Gp4J"(iQA,rzyŸJꙌܭXmŸ0RM J=B4n"QCt* Ar0v{xyq-lمa/4 wù m D)+3*V!%D˓|8,FM@]4x$2}-1~_ac)c5:|p;iC/.|0L'7ޅtg`a1Aq3NQ/ ty; A2FaҲRS%dMw+(23<'pQ#>^\X27(d EPp^9D Jf\uLKb_1 p.!Cu)h2) nbݴN"mm6}3Y)UqUu$UĊP:~a!c8 I@1!ABɂ` h}G@~@~x=}3hV6ޱV?dv=E/Cq) ʇKsұI⍆w0Zހh 8ƥk0{]6 e&c!т$( EP^Q1@65rkG#HmWCM LC5//> \\\. N;34$:?p"( Zk5b#b5MM8'1 kP=Ƴ y'o|^`L]%†bg2@E*"r qh h5htr.f'GTfe@'g=E/cdHEȼOp%*iq`P8aidF*pvC2";#UOg*d'O:v ֗-&/r$-j2COv=6ÙZ~ $ZOD }{u~x(#ʁO<h(^$DŽBF?7gIȅ$7< ;>+R,=D+/w"|jl*>_ξ" Dm:+r!"BSHzP Q3R 4:NmLc5&"1ܧD; fL3A$0}2HJ`a{=Hcg 9"|Oo;J(4'6f 8ARʨ春"0صTGUh0R&\cĚ8eJHU$CR5;Xj^?B_^p1Gyh'{s1]" |70ܴj<{Tq=%QgDz;HLN4BC;` @q$`'pIMHƳx H}3H`Bv6M1ad"|bA*(w|2Qy >1JM|\H5W%$ @! w3abzko tp"!Du Ķ=A>0w~%TpN\G !&ϯ+7[ ) *)vp }Ѱ@LCʜ>G|^$bIH)9 {s"yI|xU年1a bi!, /M6P `"R0fj3D8"#r5#P=OM j KJl/ihv $Y )xo,'?5~734hȖQed(^5@dDi$Z BF^ U 7a1A91'!8%<4X֙[NTe殷-U(XN֮N;C׃0Rk:8Y3*=p/) c䕩N`R:"K, %Ιþyz,f=R  >H\ `u/'FBnGM蜂,Š!Zr 'J!i3^p-\|MQE(1̗tňp8`KSr3]=nS`wdF:0899ѧFK :iœG88%1D'-%f, j8HdXNp$CuEerY<)ՉyNÇ w\d(0`s`#n t2f mmet\9.br.麆*lDؐ?f" <g< }ዔ&fM~'ԼO,> ?ve(]3 U_W寬!e{x02 g٠.ӓ{<@ S i6v\q6x}w &v2^DR+P1D{F0f@G S#p9y. buyC{<Ȉ=nXG0ûg NRWɬS:,|̝ Ň5q<3bgK85YQ&@'"BI>RꎻSX9 B^crr@P#ttP;vC|/T\R6  i[6D$J/˓!N_T<`lrӛ d;*"r)ǢP #B 48Wbx1. K8N*"A(&Lh"{-C)8HI @D45HvBf*e,%Q3)"ćY8jh2:$nd!$L4-2! @+0&V灳?D(%\D̓Ĕ=.ϡR]&~ EEEJЮ #M@YHNW嶈D3 Joq؈ζka2O>2ucR,q2u霧GI2J C8"=/,1kIN@v{ /VL5 ?(&]+Q[=pv |ǐ9@*޼BeHf$C2%HDJJi" jY!NIF!B`%aa ` D$()%jBVHy# HjaHJ4t(X9Dz2 `T@bia]LdDD ,mՒI6bE` A`h(Z~D+$-S5 ҉S y'P/8R p=L 8 GSN F( "k6]%RB!@ٟ ]`|`S,"DiB_Sc/ & Rϲ& T!|lB}JtR(&+31][,SŦ4 :A4R"".֎E!J“͠`~r21>XcVʖGpX탽=-H *eB!'N C$TvJ{.PwF> CԪMX4.CXW.J&<"N=iP;lm)VBBQa$2t9,W#Ӊ'9gY<;Kp{@@@8`8zx#}&LyFptῢOGi XpzIaEWb2`%8!ٍIG%Yx4FAohah"C Hh1Heq[@l ,Q-11U㎞0T^n  Ikzc5 fyt}O`o =>W(P `7uW>HY~QK[` r?IxcxRI0`; "LN2ea'Blti(NDsQHq*-* 5H$jS8}d6MFr " NHEMb.yQ^ie; O~?_Ӝ1=h}q<;IzaT?Fؘ .~Φ52sԲD2L[mng8xA-4=pz16v/:X" \QQ"qxjTF |:ǝ ):%mRnCr "o찜<D^SeHV:qk)/#v`m~F``:2Vn$PX $bȚ|83cERҍ;lLG{u=›r+&e/Z:aQ%YtwrҚS>ty^t[7lS6z:qcb1E3pԞU󿻮7ht[udN<eQQ`d#ꚤ_j>oC}0aJ >"Za9x뢜 BpqC :$!LB5@ v mWx5ǻ9a:{<:p~3 &!@=='"]W)4rK&L[VEQ1ljaF x 80$m(/R>!d_2ȀBN3"s!;<O' :l!']gd6L4L(j;.ןAOU/DC< }@KDQAǻȃA߹IWip `E)#!ie9cCrlLpN rjqdxLF`Tb`( @.;=lzGT?!Sg"m N_rgeBfA#'Eʨo46da E)"SsÎ/rjmc&d10[&_ ,c0o=v ÒOs؃D' 0[B(LwNoSY)Y's2VܣB9fz(D5FZFVLTS3JlRz8(H;" 9,Rb5kMl$C{ a@/t2G-p)ЇH ܇Ǒxva)Xkt5'QYg{핛wBusxLU SĬiƆ \/P9iExnvS36-s@. DK x_cd d0wFd5(QCvL܋wEp&6ls!Ka l'`Dth(S֡kq:I[nр2[0\J1r(H+F!=܌Yփ br/'*NʻtWUw87W?L*e;Cإ 2,SN̲Ȅ18b;6cNPȂl aCB=O H1@:L1Dox7Hb.{XS$d5J 1ϴ k-n9," K4ɽhNhvM| [9Qc=N;qhճSDļ c=EhAsCuۀ8SHhOa'廆CüxkU\%m65 e\(f(L[54{˱&UZ5;ǐd݀= ꂚb9XT5ANx!mJ'l O 7\C<>xV"y"ZHBp66hFm?k׃1`sH}RD C!3M΂ 0ܿRR/i|WU%xRP~L$JoC^NJ@uD@@B$S,!;e2`MCCNKS贖49+,P1G딗[@b D Ë!HB)D>4!ufh, xy+]c 7 @H'"ӇŦ #EUYC$k`1KT@L7T:)1qڰTNZk7IFPa+Ba^~͊pXb;FN-p~l9ϰ{nӃaU%38Ax@H'|#'6-!x [X]0˹=!~lE)Zr%'qOwi !Ç$"XszݸbY<F VXj9ƒ!NP.5rC0$Vk9r91b;~\cd)T:ԬjӢcx#'W3!Y^/m`%0i|>1Ry<)19|48ةt58R0ɘt=x礼VအkbkCmLį]۝h611L)P+_QF)(xA SWĜR#ևι,aڕpŘbP8syX#lLP$>-EvMȒI4CEu@0IX𥁉ynhy^P| 9^ӡˎ,ꚜF-B@)h"d"R Fжcƴ .2tn : S̰=ɓpBT%M,t9uRIӌy=BOs3E3BF8cA &94s6GottjLO`Xe a I X76;no  zSI[)*!OQ\EwM zfO?GıP DG[DCau*21E,@FdJZ 2dC4SG0r•:5B&gvq5 @)KvHrGH 1uב)>u~&LQl%;!o#$@y&,УR`t%zs!28g%S`b }Br,J# Pܡp :9bc̊&@ҧym=x>ύy#˜HhH?PD8  BBD`Iˁ aq l@(%'N0iZS738';2wc(1biNRT?-CP \52Eez;n#4A0p!Z=w,!9`))GpDCG'1T 8bIR4C'A6oq;@}(_ C+1߾MxOvE s.(DpDbHY_n$9l*x8t',% H8#V DV3× : H_! ,O1R? tRIhՃX2 F"~!r.k;T;H~!@1`j0P}av]XjU4DGuQ0D#I4 L?8@,+b}8}'zؤ*`, F4l'΍69yR tlUbtv&z.萆hs6`F *S3]  4 .צM3ݭ"xI!&87!ӰZ[ae!Rvq,>]ٜiyƵUG7Iu>8O9<ꘚj1vGIؽj+jP(4L+K \BQ 00RlHX%H>p)$H Ewv$EwkOm2k(=C m ?h@?.?`vA\Aq'P$X"!׵s?;ϓfI$ﳍ1)d"%cMrN8ph gN$kakLjLV* D@!$%0BPPpv; xC D[-k\-ǯ}fn46c>aʟ'on䜡ޔl٧`𔽔e,Nm~wBra êx8 A鉐8ޏXVD91֔gnW윩c;9mns޼fgSqX8qI <_v)TM#Ce8 ݴX=qC)s\1:yTgS(ՁDBӑCy&Pwjt)U"oZD} M/Ti@ŲdڎZe4hk;LF!jA( $X4dޅ Jy|0gSXyGb+qJfP"8P4 _n_*3C𲹟IA|7Ig"Z JQ(;{x]oorxA2$0F@j&JB9J u@XRa/P[+Rj%'FǘA3qO^e΂EJ]CXօ%7Ȅ @PnEF۩a;N ݭ;U\" (R!@8evPY? B7urHJ&C)-Si:=$#"ݏ,knJMpCccm;藣~ VaTH(A;NoYK4Bȿp{D"K- ;820 hB/V〘B|H0FJB 2ۧk?Sܜ0v?H{v})-"~d/CHM0@ X{p[ Z%J1 3mOcA+.q76r8>4 |U)zfESz} Mǃ(9€1tשM!.νP᢮툂H%&C[B,UTkD$V{ZUUUUUE ľwXѯa|aGg \B:r<98 @"rte F1 jmTj~3 "(J>ʫ2R"iC|/b ,g둴N0|*x`~Z?# ׊!CFYe\mY4!QE%N|,쇇ղUh̠b 293-9 ,H#43iPh /5"pvGxA5l4dc̿"dbD$`$L^1>c&xBq9K;KKNM$XQ hۉ`a~f y4IɢQ(=v# } C59xOvDBzAt 434OOCUWD'3Τ~`)$LG=XӒ搫D謗ZRLor@9A$:?V4z H2DSR/TH"D(OrD) $žШcBwQOM0 Vw@( y]R $ C!D$+}{<{p>' A#>$`r4KR5 &$yz~1 `kkYxh76fOD=3?]4%ډ"LI,ZR@CMǹ@wArUO@A D\61q%DO\ږ$;@ ^fAkI!ZЈ)X$r| hb(F\d!\|TC!!::N8?tAi$ }AܨΓ')f}?nwlXQ, QĴZKJ M'#GLA⾧`׎`@8rAA:D@ fTuO~C8!}(x\' 俅:)oz?>˶~, b @ \OH~e TP@am& I;" n;)d"jM+2=eMOF40|?QKG$;NĆe6NӉI1 J@ԥ dԃ!]O-0)! #8S4@PĪ<@ BHu *$B|T^~IhhH1S(ɱe wGA{G+Ti5`kXK#i~dktwA[CW2SDNUy|=5@<͕_|8E(B#,B$C CIH` T)|s#"AXĮ$ 8m͒e" W7 !iadRBSj000BL7J:N0xq$ Ke9,H# g ь&%`DB%a#*X@]; 3ch t޹;S}44}RnX}(CTP2 BL6C@AA")ǹ]ҧq0]?Ow9.t_8rka!D_ۋd!3rށF'UUUUmqg/b2t+cy9Fj&2~S!Xe58&nXV Ôc`1~|x9j-CcИ #<&hַA,%ɐ 84'`㓭vty$_ "d31O3u˖N0iHiî$CP(",^Ҵ@ E5,JX EpZt`a:LyU#>OWȐJ:aN+fe>~M9wm18 [$9zk,) :txd$ZշV9XaA&Kԍ*1>Z=vfH Sl#Ki`y 3(j=XMRI\gɄǟSK̨K{u$5{W)#5u CdcWOoRC#>D QAHbjd#I M"ƿ5p]@l珝$E”Di,8_uk44[;Yư1ݡ9%sf\{ a16H,隊 r8z$tSOc"_!p׿: YGs(]4ØcCFMIw ,j!Gc9f|5&&@Mm%q &8$r7?D+ck*/?G76^$l?7|0qߩχ.:! C=+Z'XxC~lwSC'KDw@ф<wmn>jE.;|zvGXk$Ji:j \Woܾh [(!2R[Yɳ\M)+N1OR׆Cg@g10y0 ZHo..qK' ͪi*V^.{9zC,P9MX5bCxh?sgR/[)pTw`ŝ9WFxBh;^I6EDdG_;ܨ6X/&򨡹!G*[ Hb'4h/NMyO^S~^hW9vwD,YvRKt]:A&=؝|[w3TJ)b!3$gk pa9`N~#G>t-H?&MIɹ5CotKHU/\` ojD8tCz iiBA[n,߇o0v5ynBXHh=[Q zMDvQFAIj!/#OQXd&L̈́&Z'-'m49L{sw և:ܵ;)&ޚ$@XubE4&ykh55|rɥ/L})eY(E& u5hHqjvvjL:f 玄<오' ﳱޏ+-:ywxLrNodcF$BNb_v;\@hJX%iS`o3wl?pm=T4mƠAh3|-Qa: BxԾ1龸!,{_p#3 >(sq2<ćoOfaƍ(Q9*}+omi" lLcB߻qo}^]WnZ!?Q2]=ʙbu2j>bf<-kU{l,{"8d@㳻D;tRwxu; Vdˆ$)(2qh]@3;iHv;][NS)> ÉӲN'T-OkUݽhyb*joN%@Uk:\4'vmL̥όj2d4M^3Y<+8Ug.?˒*q۸j7Boi>.6 LL&0b"en_H;+FNޢ@06Cd*i[] O^o]<$N=u]f|Ѿgcw|^)q⎷ߣuey5/ /nݧeG`݌q?}sm7>Z}hu2fY Cü'xN3ym*P[nᤷL[VBc `: CqE@6rD=LQ뵋F J0Hudۘvd| UG1ڡ_?nx<쪭jj,5S,)불N/WF?\9LCq5\]z^3;ùOˡ2-*-˪;Dݦ!ІԺ=re-I)M.ڙ%i^%t]F3d% ODiDT6ܹ qBu+`ט4C\,A/n=K(Vqr?"i,WNT STSJM!߃0G˺ɍF!3`L,w,8 1 !tH6 G[L{A$[j>Af#|k7. .0 yt ^SPWf;t݅蚏CWSUf.h-k ٸhn,靉<1یr[[[HK dlvtd9R<BGB%|MI"pM]$)S@HhExХ(+etz/@p8Q;3`0}T4;ԁ0K㨋0>A @ `l}֡V cFm {lőf@aÀݳ} HN8EP8w/Dt%v02 5I:/>caMxp@ht`52/fQuuŴ 6]FΪ BɤHª7 49"*8 PRKBsX^i`{dE"Pd–xl93w"FD0Tt6sbXp4L\n^|uc</aL9 '<u^M`ȰC!c<3-" "Hf!؆(1MB` %jc\iC a) "KbT&0<. fж@|ysG:lDD0Lw_WLć>!Prج@L@)sTGCQ{]]“ "be~&y0EsP)[(?18x)pQan5ШVΠX urr\~\rWQ4!xcG0FnD,Pqdkɛ P(3twvz``[ՅCäZ.!a9 È|Ik d$YUve!G}ڸ̖S ! 7T9.IBptB\,94%-$(XBNx' Ld8[(lE1r= D`K3l$?7@n_!ݏx>fCl|Ol{,9 a8&pvdO5)1D()'Gm<ӌ**Xned T 2|~)7"5{!7iڀ=dU{[}>slq P(d9>' 5ZQFN*8qDz>yIk"& PJ$@(瀳ܫS6(CjѯyP1iJo"AEtcCf)i>Ӯ :a4` جʧ16"<~_:OGi;\l͠3qEdpK&]wN+TeD P Q=S`>u4dOY`V k"r~^qRaS2<2? vۘ$hsNJ+lF\tLR02Qv.,Py; =YY}}xZB%_XkcFb@t="{W1 \IUBRR&!1$M Hh#A9p|#zs͞cd3 ;YM!݃kÃ%rJ&EY`8bOLVH) HzaPɾ2.( ]m/ wۗ߇Nպd{11"uLkM-aU֫/9=?p{ķq,Lb>ym0{:Z"1|`73}:ȇ H<)S}sO ve]LA(=J"ю?(>؞8 >))N HpJ  } ׏0=NnRR6%J~S&VL_@ C&ůh;0$bL^x|A|o*$@f]D{ ~9p,02>Y=D?ICz +`;j di!3؇I_laLQ&yCVP(P+Btr4Ў#.Ń}HS3aGP# Z11P`:5\Bu 7b` i+Q0?$R$,%H9PCuRL3 vP·(ȉ'dؑK:`7]N ji eprykDT,1ƥru'[`xq.9-!؆b9ơ( [kA$8R6L[gt9y5Cԋ+ukF~n ͦ% iY`nkq\≎ullȞ)4brɠ*Ni3LB*) K4%`WJ9 >P\{x oXZ;0!J>-J6!Z^R%AIJSSzY⬊C "*X,PDE.\);J'8󘒎; 8}Xz$p#e#&_dwǜyM4%4AGk'7@&` 1$򃋉(W+;؅3gOXSĘ8<'NsDD+Ic5ĚC0CFC%R(\m,8iPQ)J#J UVT*A-J#l`,NPmp㫑Z Y" mC#Ed--EE2;5Zh5.ؙSm&k$łI`v%0DU*çmUV d<0#!'x$kUKj%ڢІ}(usx~䢤!WRb<z {…dRdDa̯(9}ʭË@+';h NM=Ntu^5%KH# Q6 cԑ-!9(ZD(TB&"] ڝw!'W$Zҵd9R٧i%,9SS#,P;h'X#|{zȿCV:spɓ!)j TTB uV_UFh'd"_u5Éx2)MMdŦ՝+'Yt&OP?N󨦄rHx"v滖! ZR*጗08s[VքHL71D&ZU* *<.z^waCB,j ا"sNSCu3 /L^F9%;bێxX9^ܜlI !f'xdh:r _ ޅ265jNjT@萜i<--WFy@3َ `@=Tm 钮Cl8m6*U\ bgWx3(#T_$5 '4;Fk$L"̛m@4&bn(r[` q8i%3` m;d-p(sEp$٭vordhaO5(g.?UMAl+=}ba!>R/a:}$͋_S] D BUh8mp` dȦJ FMB8fA%gq*@@+N# : nj塹2 JYqb9X 6a&(n-u$,LSo_=naQgP+c+ØmYi[G*AѷbpIYXl; 3puo@~cM+ՃH4:(&"@$rpGıSV9\iM (MD(d雔sѥAyBkvtphg„d*YЛrC fCCT`8޼6qdoǐqlDbذH!p®:zNȼgy;~p7l4hn"OL !Kl 6/sfSOېC@ 7C&hvpN{6$>X _O>=L*7<9D M++dC0؄sɫh仙 Ǟwn\[iF*YX`t&#P{,c$Ȓ[p[3 2c .%Jw$& )Llx8=k8*!lhhn NpR˷+ܤH&[8辆}B4d6o9׆'XfTգѡKh05?v&&s0 4ӹ.42 fdN;Yx;0o͍g&^ܶNWJٗ,+L"0aA);*M 5 i(BАVNgry3h-b=ۺҝxj"!;(C=l R\q4ɎuBlLRC#fv`#go.YL7NQ'0)ci;RCߑYOYOzAh%3+۴96YӶ[j qyZ g0sRIVpE`Ƥ11g8dtoDй q Q9Lsum&^c0 : Qg:<> Mg` !4ԏ|v ]1?=.:6&3e l|E2 c712퐘N>  p9Lq"r,K gΛĎƟͱ2!!-L w-0 ɀӝ6:[5O\{P4gܪ]b[50 bSDU {EtIJSnݚ|&9eCC'@ڃgmj ex kŎܔ&fBbȤ"En;Nz|@JvqH8#(EJ辇cтΊ g#Wcx}Ti;hBR Hhqu*59[L eI"JRF] $ $s2KӓQĴ 4&*}1HcO'RgC/}#scv_Ճ&zfJ1*4 ̽aUJA=d9gvNɸϻ#9fdHPuކ8y(>[ML,AwՌL 1asՎWhgtFv{t뻵pȁ(ڂ[Iݓu9 SkI1Kd+3M2?@$EDN0`J cppP Ꙣ'! 0ږ%s!6qcK sዽRteivQKWeݶ-|ɮV㭇m:IP&&&uyyN^ ejR 8;'2g*idU .:~UM*уxDHI אwP "M(}hpqBL= yⱰ"iȁK^?-A獎qؐ}LC:׏ Kqo6triEV1rUuuw66wFKgV~tOgݟ4&:7qZiO>H:W{ұ{6/Ys/Mַ>v8,NiШh̷L8i7rd=EO(J2OiI\buRAd&`74r5tydavy&4FҌ^=GNμX;GpdKe%YZ1;i8uc0$"Hqh@(sCM3&NvB(H k>9,06GFX ㋆G 2]E$Lwn&~ ?>jOpiB-24 ЭZO_=C<Ǫ\40X$" Qk`K0K0 CFQ1B- d9e4 7H(RZA 6Dn+c=L@ Al 7M`qe2IÖF;Nl5(8aR`)[@dw ˿;8 *IGv1`D$)C|$`,jlfF` Q;0N]-@# 7ٱ4c.#AW4&EeI needH:}{ȥ0 #>LBȕ/'w#dڡ5!ƞ[PDHljF Dh 8SYH07@\6t@0$^@ 8#}@N$8$<=fXdIӔ[ B$Ll'䒏˸=p<,0򏖍ڮ.D\#HI=7|;U]_L@y?d8IqDKA>ZM$k5,NN`TA>~b'#JrAןrT|*CAAa) 1h(a Ŋ*d\CEi@hP_5"D}İ^5*TC(yʊrM} PcY \,`h!zL!otxxZ]!#eף(@H)$|Cڞ;' S(`_PQ #{PJ^z9.RpHp5$0e@^ IK&DI0^v dSC2I^'Bd2\tV" ^P,\%%PŗJHxIP̗x8bQ!LC$q!AUzN Gɸr&#"놜8"#2PtS};1i#GJPllq~M7FcK#6ᆧ.ar a%w4 % :, vFY!AxC@dtƐ. 7@Ёf5tQnA@2&iMg-,~TTauDMd3# SE0'/H6 R8J^8s6x W<9 L 12.@:a@=DNǂt8!5JQxENLْ<%l=Smc!H;&zr;Q 0V^ 8 @wUGUpHtӀ 6 c Kx]B(zx%Bi|)eMH:!z PmSFposZpDWOR'zv$7;GC00(g0&&>=tS ޟ{|:/vi %Pb4GQf!x!D3+߄D5 W DL)uB&Q׷;~ ({{QP0MOϿβ‚w:WH&d!@!`u@i"Gv0BGY$e d'V#Yс!!]JMSL֟Lp.,W=_jxnR)Єn)DRICsVr22[xtM:19'= Ӛ"-J'> pܜ㕒,cήtNBd&z*Y2sYo GaNʃpU!ނL9"pvǿ}O!ĈHpwBXQ3z0􆧢ngzgs&%=QAaK`,ʼ'ag ezzTF)+"4+Gho]>i`҉G#- L6t !߷x X Z46:THH&=x%pp>u:(pk!:ۧn{LD'WR07HS0:O +HJj"u?bHV e¦ߎf x՞_ I@C8x+{0ǃנ)HdhA!DHk.!M΃k$$ԁn#Ib/k9 "aA xL"@: A(g_Ooq:.M#@ɘ`דO>Xm!2L!cPNaeQFaDe;n[Bg-&(wsT n"&pHL=9n)l`5%jw-_L9dq7l!acwq`%,RBv5bDu)m~zSĕv2E.>X& wGQ$=}9CP8`A9~|~odº*![.`=dD簜Oء=Eyê>ObS,}$]8bA iъa@V 2 MӁ&h/0 T 酸nš>qG9M˸ 1#i*Ɇ,1%IQ~DO< Dnnű ıKf:gt'a,vX_ b|~uCN|Qxa+"Q T GD$GBa&(h)d\HY (Jpwv B= ݝAD|ʯG,'l{^ ,,%>"y3`bs@qYP:0p][?P2р ;HOsa¥,f &^/D@'L|e*$JqvѥBP?C&T:.Řo'>=$Q @ y@ڇ>&: {PtC ⌱(c^2LsqzaHo5s>P$JPB} J4KS34Q$#BLbP$?{!$Nc𸀥1`lep)f)Y @b`e{}Fi9 !\ܨQB lojuh{L' "I6EsO(~p~Ӹt${u㱔~j? w_REQ! P鎞0Ip،KmOpAR]h(jKVejXD]mԥl:hZ؂EF@n,T=" (jTQ Hc,&u`\iB:"y(r.4?g_aM$PU81DPPF d&FF7R^s!T7dA/̡ ~^Aj-q D?v)ˆB9kc0rL)j dAOϊM=ɫ|}O?0"PbNJv]g7v"jGN6* 8u0mӰ'g A4˜?9bFrPX2hT);mB2mSڒ.s*i>~†!OWKQ0'BdSC_!;È2!crD5&H#R=o7{y+1wi6PC2MɎm=+(hI.]awNrIwמu! ~1 YA[?A\_N `Cj¥r`I(Ug' =C*`y((uD Rrߑ$"i'#&1_xE( ]EDԢማb҉C0dJ-4J2[ E DHSYp$J41 h@.3l1.H )"$ y?TR¦=g Ԣ8JvA:ݍmM)uwp;i A xJ>=gڡ( i!!,b$XG1 xPFbQ!N$69 tTv4Hx\IzU2!Cmb9,rt\;JqEU~|Áq|^GnvmϊT0*II8OvNA` haxgcIFWAv:"įd!ގzlFA p^ XCґ˯cmޅ1֚- 00x/SUWۆzApzPQ0PV0-tjtZ9 3%k(I@D[mVTp)mw6)(AHBteUClisQU袄%J$F9EU ReQ'ipB˄^ع< iBV!М նhfb[Þ9)?I J2Gp]i`[x]a,CH'yy.r1+M $ 'f|᝱; 9( CTnjѥ6UT"}[ߦG6GSHX"jdK6 5Bb n# 3C&@p-A|s!=og$A0'd>7~, `a#gϰw:šrJD~og0e]v?5DDAAB6@;㋞!Ah } O`PN^Zw7ŒQB n#?՛f*M!% fYm)&U0`cOwX,A:!آMT^0]'\BFqE1)u+pe !ޣ<yвArx΃b@vC8cb[V҆iY; ?2*$.f@34$O8jP/ަ,n`&vHrsBQyjOsG,֥~{q0@&)xfm󤩀,%!⚋ 7!᭟n_Znv?"'YX| 'aH6iU?u6•ۼɜ_n򜑝BB1ŧ<3ӺV;!#ok*:E-bz4c% mV"yq<<'GdOˏsCMHꪠw*6ǹC Gez y$*H! B#%Jq%1Z$S|OTzt8ErGdV!-CUjwєG \-C:x}KcA8$C3A5;c⚨UUUWgpITDMTDEUTDOp1UtY "*J &(pgpUU)UDRUUQPuF2EJ> 6 UCMD4QTUEETEH*ppy^vG'zNcAC{"AHUA:E$T MOÄO5!'DHz*+MLM CS0bƦ0Q3Yp *x)=~b幺J`6JJ"( ?BDw P+ bH9+~A?A3܇cLMrNC+Np`p,"Ho "b"({pQg<2;V KD82<)`mDS${@8ɱbH^-}'|Ax+XYEH)K0 x@t5;7UJ f**jJ**緳@Џ SD5E!Q$H>6.*pw߬jsPG (?oeGre[An1JhR55aWlYsߝ7{6:=Q:9%wd0qKR Œw+| % B0k?? KKmG^/~+@Av?TiK~p[pAMB`d8L"2G%BWؑP)@)\TI!L,Q2%LH! B10QHE-QKPP 0E(DTdN hD @) V!@1KETUM(T$T$QETgvH)xH(%FPC j@d" Id%h) '?3/R T/L>~~t SA@ D3M%FF<8Nz (G@Ъ.6\b0PtH% $5V"L[m"ئj" "][L6J",XI +m$r+q Õ& P$oJ rWܑOA 2ZS`ݧ}o-l>w`{^7JẂ}u (>t{:^OCۉMb^Ǩx|x} t{3=_6>w0jXϮ7(} o{/yD|/sU*#}{P )H"%)t4h3r{ eWr/m޾ yd3ӳ7v.8=;{˽/ff)[%`kQEk jĀ/ wukΎrg=; (E\sOl:}[:oq:6{vxv9t$3r[]{8t ;u=}H*O0}4@mƅOaO ;6w֞]&DzkjfkmZiA5@*@ etXTF"@QkwpP;qPގPw09tE3;w`$Z9lO=``XfP(RݮVfwu>O}wr^yOzz/Jso9BoyݏŇ|k]zO[7۫ﻐHݪGý/|؟L}}o^Gк{H)J(;l):P[(ݠj;c|`SvT=u=osOJk=u!{woaz73Sv*\ ۸:lW@L[9;*Pd^У@Uwus鎽 ^NnsVwr{y)}|\[3YvRwg3mTgvog^VǦ󷼯F]ӝl{sbwBnӺVqx}ۻ=^lzotmiU[hhFMݬ#ٍ;u;Ӷ+oNNsՅsbq v35bW({ xrӸm<a/NP_O}M]|z2RB@C=U{`6`Qj-]9M/{@x:O {^=+놮5ٍWB}ƽښQî m,{Y٭ϯhzt{AŶݪ0P.;mrlSٶ6}w"9$-:w}rz̵vUg)`64ٶ8P3g9ۺ^wsn{]jN]:D:{ѹcɥ@(T 7Fbu \ ^٠֚;6Ngo0;#MbP6ֶi, tnuHݯS^jدs9s{@3Fn m-d <T1R*]) u]wg;:iD6'go]sxƯ=p'O`[RFP媸Eli4AC $%㻃ӛcY==nuȻ{WuXt5F+ћn:AQ;h;$ݬBlʍ'ѫ[i i#F0SA M@@& 2iMИ*~ I@&2!*&LJ4M<~(L$JD04yPީ26h ) d`&)LLS#eҚ=@!hhѠ#F &&*~FS jGPh~ې?HL%jU$km)Hf)LZDKS6bȲD1-מVDRUj) d5tQLhBJQDiR뮦(0ET[ ~K0*d Q&@ht-INaqBPiZѠ ((q`%6 F*]6pgu;I;c0W{D)W66)>n=P}??WJQa(VEڣBV7[c)Jt舠E nGSkLT9J.qbO|Z38 0b.^ɷ1T8?.$"_!*%)ѼΎ@I܋jlw;E'1NJZQ%P|@O+g~:L=T~aU^ݗzɵzPe=&5ewǻG?!ʩ"^]Rԡhեb&Іϰ Mq/%`~VDSKoN6]3RwNwq 8DZBFUɧSN^UIQX0Ɉw' FIe[zRG iBV lUlǏCZ㳇vgemвf ؐBaݎz*C$?oՏIJ~fc&k0 x2E3*"(VD~$)Jf g7\ZdjՠۣXlK,SBc]%fIPǮZ`ǡ[70BVitTFEsvboqΪ(qqhChM(04Dt^oiR̂wϧG8I sFƅcTXt3/&e!x?0/+Uh_?f'vWr+xB֑Rɳ)[@!BYJyƒ^lC"ȑIbCL֙O֌qVdx[~~@ڧbI߽?ռlE_VOq9(3Y͙RUZ3Bo -.!)vE."ٖGNzVd%IDfA|C-ٖRBX"Wѐ)iysT*E# hKRyܓ>zwJzq5}&ވ mE&5 Les* S?ONfe۔"L1F06jslh"" "\jaVt 5Pui]5Tpm\iMdOONtbٚf{;/Q:NNi! h)d39d=31$/9O^]ܼXNN[wT ݢS;g^`WS|q"ó/Ҝg^-Q(KuwIxujDZ'XCX&ili]mA(* !* XʐQ]es&١M_*w 1D2 KC4nrEٮ֩F%Lͪ8Z]Z"th?tuhùL<ĴBΫdKdїJ1X5-T2:_<,\ܲp=s$$vۈ +vlC,LmrC)"}mE\6*fԶ:kTj]MVeeȥ--MKmuU[[E&ȹɁvֈD ۱2*HdWkXjQ&1vn ,LUB0qF6A]D.]԰bK0mѢ6&ZeK^UY(]HI%.TJAFyb&Y%_XJ?6m_~:"7j):g p@ +aB:l# R)? .gpPEUptV;͐aU[m.K.K! $dUlQK~xMN?^Lcunc,TEXv]>Ϟ4/~B}s)Dվ,?CE-j:IOYp(QT}J"xVuLPf#eE0Bp+&,xR?s( ( S-dpNoJH Z>gR!s` DpT*D)ZA0 km9j,}?+2*IKUP(?QO}Lduu m\^qJ!c}"5JDR=[~pb;SUUjkj-UU^z VtήЯq H)Xrvq9jhDbF?]\:Ԝ|Z[F 0jU\z:B/E*0R}cD8Nv^1L‚bt07Pb괥 55m7 "mLApKL+f҂XeQ5ML%H(h6uS[72O<5jB(0c54mHP v5a.JĥYuCRm`ֶԪm1va3&yfb̂l5"؅YC%s(bઋ(RҺ%JZݩEbhIea\5)m9r$] `OgueP*7[iva&EZG,)f%hBWn(TW yFӌEs)N|y2 2LA P*UNrZSfB)˩^T!ÁD׬=^.Z\)J99B7;\|5Xd>&Hd\k'R?람 8MkS0ڹ"ɂKﻶH-MX-,-Tu*QS.lK"E2K I[$* ]Bȩ+A- IJL-5*3*)kcuEdK.c[\[b2Tn1nŲUKXçUlU[BZZWժ9x9hx- W*Zh[֫ZjZKsZUZkMsU|N Knv:8 ѪzSʃӨiZKQ%JѦͩZԶU0obFQ[ ĽPAv*lRR:Xk*@jM@3$4%\EeaȬ%g'-/5~\R7{qAw UJjS45mLabQ"R 4H+B ' ֠QC!R(fVcEb8u(X&5!h2,ŃQ⵭5.c4a8#ᷭ%"ԩq91h6=8WUUUUU}Fr*nh34dXA id*J֩ %+"QBV[lփKZ% (ĶTZJ붶D)Q+YѓSQ _[2칭ŠJQ)%Rvpܼ=,y^NP/ 2aBL$Ž[踔PcnJ9&!y;ܕ7?gYru룥k3iLBGH# FW$i _+T!v?Л<8BB{Ko7~rwFd'#04['ˈ; Sc1?.p[(C:?,v(<@On: $#|"JazmT?ߥ7mƞ X? ;j꘡=(۫2K־)0F_݆6 JveNn4qthFVwz _)2T'C 3kAJ$ l;W}u"e?̱E$qUhE]Ǩ':=xkN:3 u"QAJ?7oZCj}p[rej{*)S pJLRIہh|qP8s{>@Í;xҷ:$LOl5Ν#Ui!1ү (r6"/Gc5[>lj~I{OGWxnF /*忿A(,Hw hx2ݹw˔O?KS餯ֺ{/rLT9Km:"Vmy1t½$Ʉܻj2,jx|(lZH&UYֳ23ݳkUs^'~%:A^שhu&/*Y:^2CEob/;Ro%F}w鹞UD}RwYw⾿-Qkԏ|LG n#? W>?mƶdttBi9lRd20(Rx ]+TLaHjaW9Z3"k=VDCG;)as`L7&z0o}x)3J>>5ƞp=kio#&3Jm)wd{?/#m8Vtz|<=\Ӵmi/!ҼDrk7I9!'e9iϾY$ !&S9 whhv} d8CR`az˲:uX~ Ҳt'CޅZ|}(q:hsE_[Z -Õ)?Hɂ%ҕKT}Tw0$6Zmj^\q\*D"U=~K.I,Un 0S-n8lSB pR I6H@}N|q9RVjjÞoӉz0mLuW=>8v]\,5cᜧV??'z4-I%ђ"|L[VrLO_*=;T&}Tb_)@]sk0A@~zsu;tbEQNЛ碦)%Q?@wO1=mMKgy:KE߬E(#tQ/'L)p<BDd =!s:}coUJa mImD5IFd'6`d0UWlZu #R%d :U6 x%VSclUvciRH'y@竭>4L\'Bdѹv$4BHᘆ"!.A:MzK>|h9?ۮc% M<``|0:EDQeJr(wJ}XE-X*z7Nkh:>vH{w0_ɒ8GӞѺ-8P* (3>Ik:^^$ϺES +5d^m_~=@)7x$d:Ǭ*XtCCӓѡH+=DK*@x)OC"IL !x ڿ5ָj.9eӻD7fa:RMCEOc]2yûl(H5T}~ o"ޝnG2z9/s$d!&9Ȣ@yS(TH.P;s*)?w/?iү/!ObĎ">qRPq/f FN:pkq$)[Av]Фq>Sy|˪uŜsOU,鱼\͢qlle.oA8-1Р% N΋>eexd=n-~GTG %Q%ny3ӎY=-Sc pb?6H̡6mWZuq\|.7˜! Q'iB 100M:ӝPx I 794N.@!!a,ĩALcl ImIISnRhEoݧ&-Ѭi5P! &eIfU7Dа J&_z,J 9L<*M2 xeb 15g?.6;bOI1ĝ>~,1XK @":\[Nԩ'~N'̾z:;l.N2ɸ wUм jJ(.iL)z%ħ679VNLN "kaQaR^Yc9:Co4TbͲڔpշTJyV3Y^$zq86m:DpίIϏ[3ž⪞-{gq3T(]aTzN_)ӆ`)Ӈh0?gғ D^z T)U5QO RQN5+!QFqt@.&Mfcm,qXܠ ?mr~f[ B-K1pOUO'JӦ(t$OӮ9rT}+Nq!q#Ͻ:s N3 \Ҁ@C%]ɮ##l[-ϓ^-IucnsJlLص*?x|y$o *SOC;:BGvgQH3_ @n:Jmb]|ZT;D!:o&OS#1_Nb"(?r ˼DtGr|'Ml?]y-Y=ZUGXXd'2Uo$ 4: w /=z(/{9B1 GPU(__{ 6 G=c-}2 )' Frz}/n}m@.6og&r ]<)}i7xV[::Gʧ!7Wb&ZL$" FcK-'yw=Hs0YHϚHw{lm-fl3v9׋G0|ۤs%snX`ht{Ó2ޚ;N~,h7>,!o&hFu"/l_\DNgR7_|ӊ~[uq`?t8Dn%݊}q kui;~>(P''I$ˆ9uܚ'jt[ڏ}˓b d$b>C{T8CQҋgxn^#(UI9!֊-T$ZAR5I@.'76d| *(^L^Sd )R,[[]wt` &|D %xdʌg~|P|8p勔TS"+ю|̎^=e&uQ<wdώø{K zޔc\יp0[bǮfs ]oheX_PUEfZ) CBr?PMn?@MXb &KN^/ NG6pa#h>D~?tB(&E5"H߷oY7k$I$5AA5:G!i Vմkk,mXm*``K[2cDjLZJU2%!1%Ed`i4ERL#$D)4 A=āPGV֑4=RAV&-8-ܽm0;(Iw1}PQj.[T`)qxf]n"5,՗ Sض:m&;%M'SYp-:ÏkNyzumReDETRSbԐ` ReHh&DB3p9-Wx0D=.M"ܑ+O;6NR)בR-y Y*˪IbNXUZ7&̷xMhHi17ƖF/1H{w ZZOĮJLjwփ!p굉dE.f'O,!xOzʣ7[]T3`t=֓j5AɦRhP! Lb6 )RAJD90$rCQ3|. \p6hX۞Db:Iz%wgqY GV?i]L:2er)֟=zP&&MmL(t;rrm;?0Blkv}ִΓdzH_; X@B0NX-WdzE9C$Y'vS;+-B 'cCIO'*rœ ftx U!J0{';O%,G0Hl[̌gA#?QOl~9c"@H D.ICBϦmH;'@?ۈp6޲O`z2uzҁ٧욗"w2*}>ӿ ±@m)^>(n?}FG#|= cHǸ}yN"+鶻.bB)`4<F'kQ:L@5ܜE|T:0%I%=z7gR!njv$N1ھCsyEr.\a5u0sfTRJ{t2,C]JuC!g;\5EeUPWp~G۹'?8^\2e4Ta@仃ӡyW^Z=\oKʹR*Y'5ޗE OuPiRu/pqaV-Cfg{D/\,e֪r$JmPG%ƒ- ,3RcJDj!VZږĢ[l1JsQF b6Զpɖ֙qmnι@% !D4nK!k U\Y&;}m[=iiw\-2𿗦EzaoyJ4[a>~$E~BiPD:t#*m?̺lԌ[6XRu+yn#gׄy~Voo;%3䯟q"5=9pPV H(IۿwnQ/ } I珌rBWTARQc,hbU*4T?fSZ;o}hFooNS; vGCq.tt .o2L"\TbuKK[:\${=- DJV]q[\gfqV\goi6Q~ƐSx|מn@RpSA )gd+F]x^?T,%3ZL_&4ucx )&XZKӄ3S|I(WJ=`RfqGtդցZ5R~iN*'8鴯xlGەϴ9疏lvjե0Srʒ-QqEk]+}?murg TX4*t}S})G8i 6G_!@—w(ItJa"w&^DC?׆YzGTF!E{*W6{`JT1e=y~>^/k}$B_UP$Ihy* dMB%IRQ7][oߚ'`#G E?<~co(!r*u+?_y@vo~A y\ CR hiԨ)P;?BJ!}J!IPDT"AdT;z)Eΰpv^Dj }E8(%63C$ a4&_dHGcOOJ`'8:g.e8㞢\«Qo{}<#xŸ'Q6ƍ#jʨ5.=ێ5[F%@*.{&YI0Gp. Ez^tzƑax>r 8%p~_}m?>NQ>@|@BDE$P hQ)AhBf%miZH1*B(ҩ@[FڍTmj5&YlڊmRkDl[T[kXBP*P"UmKmiBJ- kjQEFd4kcL`&@(R@(D--ڢ…"4"RHҍ% 4 (J MIlmTZk&ēMR[XjdP!ol'rvBZ|0_.sltN5=Y;`C[s^Fww}Q[r\v1}ٿӡR‚{ ZDYއƽ]M+wG:|MD&9sXP}Ƣ7c%ʀ+PV50FP(DbwB%b1ǚ!$%@Dc}jd{jW`τ;lur(:3Nt:f)abS)ZV6Y5vŢHeTd.L^wU)I)dPJa!iNrGjS&Lqs1wqEu\ַ7֊ۥC$O4E$:Huzq؇BfHZ{)YQBd[M+38Uvv O QzjCM0DPRMiZNjt&-Eē\;A3 b&AP>wdD!ffHF)1TI6KKB H&)%f ɘRj1!2 B""Q1F&AfF3Rb0QAR$F %" + X 3&MZd,D1bI0#nj".fab)P {'ze xA5Q3$&.ǒbtMRYSrjm ź/sd>SVxⷸG./S&uUĨlO/U0׿W;c~KKüVL6\E>ZK9.7txŸV4[s tMISSړ)Zj֢`'C.RҺ %hjJ咷j [GI!G\9S'W"ugF7JväT|eqWV+% /W&BLQn!2&"!ܸ),6 6~phg;BInkFI3eZr"KEi 蜷t򜨌QpQB! L4(N7&X/,:BյQ<5,P;&N!˭ clJI )b31 $`M2heDS#`( "Je"S$(Q%l-@4XAbL%lƶX2_E fv{-hwn%:4R*=XPKJ-:^a/6EdI߾EN9z\KUZ֦jr%=V{gjnK|6'q##i]֊7.:ַk NjvD'ҭluczZ FIRQ1PU*%JS9{@>6džf%0osnl85Z++Il[JișJ/Zֶ7h}WH(s-fқ⨋t HkxXk6@OZ{ w'I~jMʍBBT:BӍ}OL"ƈ|UxŸz4ȬQ@iŏJ`J5.˴.R-B3&Ne׶Y8傇^9ṡǻRuTiKCBD koGC%aVI8y⟎ jyl!У8мHq9YRwhŵ4]XyZwQvDpzN;[{v4YRm+y&Jʺފh7[!HJ 0!(B04Dh1FY-3D,J5ԓ Ld`؈Fd l%0@(ĒQHH(TlmcQkƌm (1kQh-F-5F Q 1 EكB K21b 1̄02B#% HJdlhQ1 fdJe! "Dɂ0CM)X#1h U|o#qS3~ 2?.D)Qf@0@Ў\Lp7˝%OS)$D$vGqȎ xrKГ"e9~" q@[&.9?9`M ZPüOI+O X/}GE9W$MvUyv!s:hr~GP7P>,~x:۩Nj>d5v~r~Fc߳-olWy*-W"6X%^;%DڠOWTAG}~|JXYd1 D "d4pqw<^z{E8ǁ'u (xua؆`/)l8qW }JC~W3z:f tq(?SG"H+_HM#++Is%XroٓʟԞ;nҐ9*@Mn;+k5 ~5x̉ q7;1<& `VK2ʒr:b:M`\3a))ֻX 2`48&\3hjP> X2x,ԫG?ؽ 58}ophDBI$J^6V*WY f@ @̒|LJ|%{tBHgS 1b(D=y9i~U߻Vu gVfV3oG7ݖ -upES^fά <V&kCf:D,-קWT({~oD<7.=0O/NVzܼ<˜GW!rL80geٿ֣Ck‘lAX7f*N2RP,h@eUD?FxtF7GpĂz7 Њ4 m訐yy!iI> {@W! s_aܿQ\Sg+Φ(hrc@O m(vەg~[𷈌DHO^.wJ/gXN~=AՑg;y8/k+{;zo[TwH5fOqoI^bE. ^ Eh+Np=N^}@PE#" ܙuf@; N[욱/\ Mp;n:;޳wSʍxɩ*n"<)8!6 T`$io8njKϢwĈ O*}QQL{]7ӝU(*BKQ)$`gx~_T3{OO~<DžwAIB=T%2S/9_TxE^9ܧ幼DwPw_Y )FxC7v·]( +Jn0ٷK;9j&"7F69؄dgo 3\aMc:NӝxY}۵}pؚKx<j^]2Xtc#ٞ. F}S T+$_ )/FvH)Wə: j,G".XzL\0c'!J: n"''d$Y*Ҫ]p=\ wsG>HkUoHeGDžWf*s8$[zn|";#V 5I|XMv`ā5i׎.]x!mμ[H%yI  BܯIq̊`6پr&r{\)hY ADC?8#A{WzKGhk|_';ğzg8*/m;X𳓞9O9Zޟ BUNyPeqIfL5"\"0vtЩ OXxPUf"KIx$w%FG.^)=>4KW]UVMu4I&m[ɜ"(xbۥ-i#HttjSPJs(T(d:x1rѼ9Imdcj4GSqU˥Q1^oRf<"IEʧ)˨ȭAuZMCaTT2};T$VT!^(57o$I:yhhrbHC˽ʍ˔X׺> wjeE;ӕ3+ qzzhTJ'"{HwO'7&_jqx2wzR%)CH#>y?ȟsw˾!0OO7;rS~0c.#A^{[yvzK๱~o_Ɩ'КQPi>F_97zO OPZ4BM%Rw`'۪.0}BA-TQwxM%7^^ !z&vSml=n2YfPb'|1֟ ug8)[E2$w0x $KnPCA$_AA2TBdf`bSFElkXֈh#; MP A7DTT)PJDPT(@JʨH*P SU*J%Qd5CbaLشkC%M* HR")*#Z+QmhEB@+IBХ5&c ~XQT EAQge}L BEAb(k$VBUIbV6v}*)J)A5(!J䐢z15 Xw̐T+L\W11K-kŒ6X %gc3;g9)Ł@D^8"8wo~5%RD%-&n¨S7>Qe⯏#=.AD@¢wG î읳 x=Q@P4@e`,=,QnoHƱ﹢֯|T̚[5Ť&NNu9µ谥 εs煥нK&HӶ-lmgEUΚckx5XeR1g{.fal ig&3hL^-D(FRqnƂHl&$D"Յ؈$PAae6aPG/zNH/w5(B(h寵ݮ'rvںչEnX QF|Z_9dNUp7DL>"-mDa1&, VuazfMn$vYehO[m_dRlDhGJn킡ؑ PhZF&" #T ;tDASLTQ\? ¼`F$b a%Jqqc,Ϫ/TPRU_g”4j&I&1TE5TTAEM|D,i‚PX#, Ƭ;NOPlp"9|^f "iI:wȩQ7>U-%4#*n ^:A삎t $)M #>* 23f4EDܩӪE,Dr> ވ5PLTMAhR ҃1#,#$9)@mEQUlP҅ R%+"&(B5568@RHj}bQ J("Wa:xoSo=zodJ`T XE |P?d7]vS#I˜bکSqljӿ.^:_/_7֊ MQEKQCnwTC7CLLATl)ǾOw{@En2Уu/A4zrP=c${2RT;#;4J׼ JhRi&YI_[Wܺ˳W6(h ȎO.>qxzE$1Q]6Wc8\^qu2S0 )/<⻇,~0+#C},s~$8b4Svx\` -mpAqt$1rewClkNP 0/t\ofB;Fpj+&gU]%gl ;:*|ҁAl Cd9A@ja@ !xṁ tqJŤR<UVӄm'rֲja-OK F֫#p J"TͦDhZwd7ݏF#W1EʘS.UkjDVN=9|mwHwؔ'}Qq3)yV}+( G4D8m@V#7q oEV{Ueƣ-Z6K[yAl[4zwBEJ4TDMJWv0&L/л /P-|y!H"H0ٸ^˳>zD|"=%4 [V"a2y% Ùsk"类ؠ>&qkgym?a>X; %Oy8L:P]fށVh7g(T`>{xuLI}-TBP=Sk>5~=wWCvmݠ֍JYv hG.UUO,ܯGG{ /|pQ_vHؽ1[ ӒʡRq/9,;>l[#.Oٛ;Y,, ({6jSI7o/39tjss9H05USZ8S."p)q2#՞A>VCM"cظlm-}ķ5g}Z4KG^Ÿ?68/CMa^P)Рa,nr2k0WpK񂄛ʠoF<5 >A.BR7 A9d>ȏ0}~`qPD)qw)†W{.t{u¸>];H}O2gyIt7ת/ai(zz! ܁{]X@> g9UI}/ˇLM,RA4'M> ETL1-9󐱱W* 8k^8xXvdHX7M1Z19C Z)h/)o`>!>F$EQ8GdG}$jkIiGS֓ӥr@d?סs*کۆ]3b9?e{?tx>Uu.?$S})VBڶ[kF[mO߽ypww;wwqwp ]õ^J9<:KZ~2,m&\s/ c#_s|n \7}t?V4mr? {Yg$T>ר塚IaAT41cc׻;y+mG#l&7Q]™q$_lZ!$B-\x$J,QF|3rcn3漾P7xXh8dӫ8iU߷@r{Lv)rlBIbq>;XOiK݊ƨڟq|#oP-\*v).VsQ)$z-d\T#8!JcQF]W#̔wМ+Q޲P6Fb-%1LA!I$I$H@@ƀ $k<1ऑ;#wǴl; zaT5ǤRX8kwxW>1a{ Ȅ#[ ūR/3(@l"@8P>#X1G&$@[Uc ؇1NS>HΖ#?4Qe1'U pJ@0EBކQ a,0),DYÈ2DBB{mpl]Fv`sJB}PFxt&|e*Rdyx,&!8̧־/)M, 5Py4lzՕ牵Fbp2W:VvjZja7^DKQˮVz"ჟ:+Y ợ8>ɭ_ʱv3v~)mۨiXC̙ vhFmDX,hL_a4$hB -)$b850\JLʩJCn(Y4rG)B|HTłR6Dg?璲xa4W}`As?p8'RegtQu0@r*Q&HԨ*m?DG%=8̸BO6s5-T*y_>wsvM C تRvOyh^7aߙbH`8Χ>LJ[eUUUUp>pzxӀ99S_gn߇(G˸/ZSx o%]b*u`*,fUY9k3S;^s! ުIf4eNCȥ&ϒDT9CMѩ)<83}$ӗcPau?Ѱ!T}0r&tWEJO0 ߌB"t(0PPBq٣•G_'3'iRQ^ B b=x gm`xY*A7L#2=rsQ90.})Fe+_K\UIyA P.B_ڲ>cxڴЄjAQ6$r]۠-(c(fCPT3mѻ`@H)S B@ E BbZ8>O]YH7ICΊ=^핧6F>ǬjP  lNz9|n/gP}O:[4w:߻=)$a3Ѣ$ dUoL-ȕuxNAn&W!shC 9,9iuL!4SOJ7=diwc,#9LCɌdE=WBP+<7_:hNnNpC"rG20dY ͭݑ *d{ˆ) NY5T'BLU_ssT18.  ]*aT{Ϯޠ$A-݆B9t!W㓝LkEh~,ғ5tCC='X^9': 9wٹ;2ioAMtIp('qRMj3R3!RA0"N%Q 9NFb陦bCF=88zr^V(>c&zğn]E3Vzܫޣn%aO2NH~‘ ÌT:I$I8q("=G8sl q&Ԕ!Qʏp6$wR?*p9f+"%>y8 $"k[.C^ Se$03ޯJ{mQuDֿ$vMR)hۚb/LiyR_#D3;ژNħ?`40w-`" vM>X!=#.) 6\ %֒,:R1*E YĂ 'yJ M8:]7_5?Jh!DbI!_}sa> `]riJI2JG#mլqVD܏}G^}{F h |ńL hHG}8b(f9oI ڿm7>/.n0b*[ Ï_'s^0ϱAAλu#D|9]gN!6Cv&N5FW/=TqLET9+dc? A@ax^|eW{=qݮ:_σ{F J uݺ`owx 2ԐU5?:8 ,1y Q=EE z:?O€yIjT4qõẁpvJ`BAj $pq$nnh;1ƻHF6b)1"MCPJr(s뺍9 n 8LY(ȋ AJlθ!9GI$OǑf6]{־S>Oz1m# aQ֨PB!x;zbijE^4:++8Ofޤʕ.GX4yg0(Nlłz߾y-|'8\k/ypeZa8ϏG$־Uwx"hw: fAݜ9Þ 9?\b'Pe~ as50gs ev Q\@!G/L4F «i`wS{AЯկw8u^0vmӤ'%A0ORJB.)2 BBRF (ߑ&1DD#Q`KBFA$d*VJǟ_74/~P(8 ]}-bEqrOLI> I!! Q*}a"o҈&KSN";ateUkṔ TS_Xn'.|3`.0 }2%rr %e4~|hCFpr~{Qomj]nws}:]B&@1TF\Ok\1׷RBa-̷pBOWLh"<tLD+<'ė/>+aT?X~'X+MҕU=JmW1Ƭa#$$z(6B1[AǠua(jIN۰6T}?t b=/r5}giZ@ècQ[PŜV.)H#d >?rύG0jUPFB|%Ig=^]_W_ BW+rLw*R8ȑ/XLR&ɑ1;O$ = t +XaZ#pO!!ڣVz}߀V&f蠑{lBbJ"SGzAEt{so׸ ZV]8=-B20C\ &l.!'Nʻ]J_km.1g(xག !q ` ޸cZ/\?OJW Jpݜj C0U,((Hɴ{jGٻ)hbQ2o)Ȩ9UoU(0JIt'X]W# <& ֢B=#=û8KrF ݎhl!?(P ZQ_:%gCK"qLD4l Ԯu,үû/ڡ`cO=lbOHѽ[؎p0vAcW>]AAT4s/m_mZ|41xd !<6KBV ~ηNz&c/v2رq|k ' 3D#U?w*vPY 9Zs\ fk r k%dvpEbMorܮ#{!W'ywxgĘ- Hm :rKC#uEoorj֕l/"zn~;'詸rQq`8T('C#+ȥ\chgY{Azq^<꺿uݨs;(Q^<2j9x^YLQXB\Q9§ |i 3a(%>4.P &Z aiQZjvJBnn əيOFs.vSh6xz:DҖv H8!ncP/E)z!sNKU& DsTЊwd6)#אNذ=7p zi|sXKQd[{˔̠'i7E,l̅X\ #bp)ʡP0a)%Y 3A pETz#sD?R؊ v7}NAMw. l߇8 J2!pDX O QfP(a9ɆWЎe1dc:R[~ebp|OL~K|+_j`Is kJEmo[FQmNR8('Pkv |1~D68vh>Q?o](+w/.6oJmz̮GR~ۄӅ8\Q Q.ܮ{uf $1(SfJKsZV^  $nrf`Qk'2Ǝ 7<06XDp2w*q)6Յܱ1E2z%1~ذ~o-5{j??|c\ &]+55rakq߃#Q#j;ou㭟<@7 a>ϯH"&WRJ8ϟFa0nl۷35((bX*7MDB8mC+OgN陕BJ$[ؘ&{Nt7KFniQuN9owzl6}!rT~tv(Lf?a1R*tGi'MKR{?-G$%9 bȉQnːaϢ:O軹Jȯ~55!PGeOȢ %7jO yv .1\A\I_IbC`יl\칽x>>f d_'K֑j PJrz^mreQ[ Q X'O:Wi_.!pjV@AAtЅ3¾/^xj"u%=rM}vzް38uUv-(BBH8Hq۽W6}d(["$L d G`M@fIW~+!*@baj} p 1HZ_.K?~d9ޝ97/B7V͏=]O/wj٧?6HsZM75.1_0wS*+exI9 NtʭTȚBgף{/g ȡE,oP$ Yaq,wS3'2)%K g-scsC7wu}dm~ Uhʩu)B\ͼ(F|aQfG,.YBjܴGCSGMdCcS3 \z8_TK\e(O}!d~ߠ0/'m:dlʫ࣢rC Py)H>n[ \LǗ}a&U< 7ATUBO)twBJoV688&JGJl]8_rFQw/n~m ր BGqW o<1 &` 18`jɅt@ƫ;?tBt|_=FaEwh%Hj*I\(nh0  a_8aXҔLhb=4ŵ3>5 d-v#QG"_K8B?Iw T/twLYQkɑEo*(1vy^\ +h$4f9HA/޽"&nFr1C,.`nD2 J{#SַnVAmCB5+RK@0iح0z9 B'wwGF;.{EVp'G"]3v?_:gk?Zh <7%$vr+=.Ѹs"4 Ü'JAA~b(9naDG?9鮗NK@0( '54=jjz"( ň$>lJ)C}$aЋg#G:o>b9 ƗBcc$}j%!  ;4|- xxj0cYq[p= ~nc @a鰆5?6>&#x`>!238 R @sS)d!w^Ü%4sP$۬0"d=l)1~OO\eF,7[(Xe =~O~q-k!31.xL+ϼPA!_6i_ܞ鏶!֗u*fZKY6 %Ps{{*}#T8}J>g!° s+z>c/:wlz{c_kYiuƓ &M~~wz >İO_ej~dR_׍k쫐""B-kD"!fE zimY;4}EAtN ]C2 oB,q&S@'KC- aWQE/~l\yL>z4WCŜ>=;(Bf41\`f)bIF F4* *  %! H±on*0S) 3KQgHb(e# z,઄QDkA+tfN:'91IePDPetBb9%+}rQ4Kbcs0,qq^wjIg9Zc^ÙpYeyYJ!uJR2Z + !BCGBȄ̓KA3P QkG[DCK$_m- g|R40hEO>:Ft0$P陂N!48crrזڵm>Vs;٤"F`)(w{kڷW1U0 (1> 4*QɿaAvdlmPX{#H kzGhQ_!Z*((@_ `Yذ`@`ߟܮkǟR ?QS%GaQ*N.BnR1 rt:AX<~ƫGWu%iҿURmD(`` < RT!NsAcvUXްmpE$sMNL:ū-cy!چ#8Z}*j:*mt 6=iV i+-@ƷnEFIDyGu^]bv;AvUƨ3ZtnnOGg>]?^8\r_b g}kmam*^T; K"3 c!aQ7A%+SBy\>:wg"@eyk_uE;!5+8KjyiXSc$ѕ2tV"~9'ϒzKml{ҸCJ ,ҙ ,£8}@R8}RWE7H?vRs3 ѯKzx7jd9*TXl+@R߅'̩r~I0ޜUa0 Č>(`I"E"E<(ms -u] -UU;<0BBH&o4TlDA@e)֌KI*}|`8mN~& U53VqT ޽(_'Sh?y}G(%1C.Ԇi](@,'1RN&k4js 97!Kn˚? *#<8u9K([[ "_Χ?Z!.y)쏋03LݘMj۷X3/SHL\]؈~ :a GIa9{ߑFMEL); I!BհʘY.(&OM:%IRmʇ,k-?|6 $'HJB&eSE!kk':MN-E&BaQUQmm3Ҕӌ.cq(ʅ495=tE^tZaRELsEc9ΊJv9NS0Srh;J3*)(ӓCmcrӦg?s{U8,YR}RIv|zUDW. PK,٘yIO)cp؉cN삂&+Ӝ-V, A@@+"9!L)8 k.jr嵼;ua+L!!<7wtQ_fXZC~|l +0'EH`ynw NCkvO `* Sր 9_2vaCsy83ְّZ=ߞGd0a =O~~=\ڕC7D`k,$ą[C>MߛcQ9x2 G~RH`#Ҿ2[ߗ F7:#ٔRMm^9ni鷦GqhZG4z#CZGQ k;$1 ^#6biiJVQtq$zr&Q]'۾3(bM#;A =m(OkC[t=/Jb"CB]C*r/bM.>"W(Q|K 1]TRr0Ҩ|9,6ܧwm7:a ډ\{(y|Q pU?ՠ/(Pui* DFZʃXE:v"t]WL 2ʶYK(R$-mB Y""0ʩŕ"v)[UEh[ZKJwJp8'Mq3nXfW@wqۻݹΚFX:ttwjܶwnsiHjhG.(;HE͝D^G*-"֔ j,J-- (ݩsۧN7Ԝuݻr-.$235RNhTkaRZJsIWvw*(\s$r2 ˻rhawv@Ne&ɄwkYJ1JvDp;&ZJzAz/_YK!ԶIJaԒ]> |}:MYM60  ѡK+H(F(DFb]ucUJdmIfY3&H6Wډ %ߛy*XR-ScJB!OtO~뗿 yu?9R?K+z5_[=$Nֻ/t ˜([QfQF XAO`1ߚ ߡy w{;P PDd4!0J]v 4/3TRÝy}~9?E@XO,'q^1c@A 14_#cK@v#a !ʾYWox$iu~?M* ҂=qksеdi?I$ {O{} [Q9?tchp*% & (K|<}7{rGʀ f@ENӯ?*T C`~y3| A0\YaDdGrkn7u8^O~ R[b|y]*xB=XU9Y4IÉxd3;ARx%N-/@;JQ-RӸFU)F a+$Ć@5U7 >oc,ZLף/Fv#l}1!QTA Afg=oi˸^'qA?.hBi;>,qw2peupWwJJR;b5lOet'R)`曢p|Ok`6#8|/\%K's<u,e=[˨_n\C PH3zPJqQ${nj9b ĸnfDpe~YOG@_ %p~젖?t:p|B(GZ+#ME2h;~ca%Rd5O`MU@-Bt@݄,Hɀu ("@=U\>\qqnv[^޾ꘀ'>Fe zkЇp( Om'9d ܱΆ%k<;7 FNqJDeI1/m/vtkvtgCOԭhdvrqa }](0hû#-#ٳu3]O<==).(c"DHp. -{: |h땍ݪĵ?= }ywq'YibxGi8|ȑa7{( 3gGy.!M%Dnl~IPG+L raÎEΏ6Ҽ]b(̺Bˈ9:V`3M;z:= Ap+?@6QzZ!<H$هj4\1%v6A`{.pl w!G)>Y+nJ_';{pv: 3к@ڊ=,P} ,}tI$;qLN#=u?1nHt΋WCcmX3"oKJ/+w3}C&pNx`G3wͿQoif>/nwjzK#{>Fi|v"}:-u0!*[DXo:N X'z|\ SV#>O5 ȍ,iU ֬y JO'A ܢ\Q{%31+9{GѣD^xI> [ooz?؊(b=Becw Aq~Yn6o~y;XLwVAI.y8ӕ)$ )I T=Cx/V*[G N1M Ct ;5^8%#\mw7>GlDm%ڵg[h0b6H~U܀иl&uy.hyRyx&Ըp2着ŠH՞ݺHbz߂tnFV@HAiqO9sĥꉏw3 Su @u˯OP! P-z 0sWX #FJZ7FByzFX{B)z ) ^#6A>f)TyPH)W G'syalM${{ l}kR.KL3[  (I7úuW!/&Rϩι?7c{X*^pǺ{u Q=:0ppu{Wdz +o: DUQrR)( \h_-ks(y?|O[| 1< gÛ~%CzA,=,樸K'@}^u:d{&%oD 0\wa9y}5T=S CKmec\܎.. X>IY}-[oⳊ F_U:KZye v5crx|Ԕ'`C56#SNr%LaP/0d^oyThKͭY0(E2طo`wE~ |A DGD|%˶dFqXaMÇ? 7;״aQ3$nW|1 <} k=Omꋯ-I!\xw~2cyxd?ɳ|,g]˩:J8p/,yC>n#o֣npt G#Fٿ ܲ\7f\R0[.b D(dw.~;^tNi0a BL~}r_޿) ܝ7:8}A7@z1A:!SYW|$v%,ԭdiG9pnF"HPM`?ȣM^<^D9PKGcsQ<XqGfG^6+t!Cڽ>ܐuv6ߪ}; y+sx_h0CuQQ%b؎: żͼѠ`ziԫt\kiN`. LI!_V46Y$odDCgm_kYmBYz0ÂbTS.;<(>ao9opy82 ̊`| P4| A˳*O_ˑAwO=jY~o`/Gab_whޕ7fTm՝1? 5כ1v1udlvv:*z go}g߮ʝkF! pgrçW 9=W@ҝL UzVMg;0[2=?Uzާkwy{rT x_h\7CyytH8z~m9ơ!]#x{ N"1)!ƛ?\ X*7Rvط DUgӦ7lŒ<XdƲEY0X.1{O]vs-̾2raՃz=rJ-܍CG3 '& #Lr9J/\0P( **vupفs b;=x4PlY-:Fl7>Nٶ 3t5M?ϬB|B>p3~Mނ8}sBP_L~R6;9 'jZB>J|Nwx}wHk?1ds\Sǎ`#)%&]bYt_q[\w[ @=}ۭ}yo^Ra9Guᆻcg6-:ogv5Pʚb$~wm񛣑E_K,cm<+K͉}!Hl4}_XB"yB&;W/k;+KGw6| ֡W[aOL[= sݰtrBv|]6MtsSl 28Pe.{ ݇T-1݋agʍYi:tNбIj)+ˀv{%)sv]7@噢㤮6 GTb>,;a9a^qwׯ>>LkZm{wJb뚸nRr6̫@E ;ZtiGɾ ߼ =@UIq& 0I(qZCX>C@d+3k2_mt\fRvG"(Foi:[=`0{yWWBe\H)E 8-e,<Hi=tӟ/ s6hxcPaNm;fz3a[P? >%'H!|ݣ83Bs,q_N9ovM)_'/s_7ws{C7kTOׯ88E'{!`AK2jgː ʥQrH4IMsw6ꀉқm7Ff2?7{'z˿Q@~h9~M9}ۜ!)N]8a~ը}:S:\_UžZfByk j+0g)}z#گn057BG)K8FJƊ0Ax/R:d0Z"*M^OVЉuj#ϐVWfMga;;:<{{-YՏMnn4 LHFv[k"]kQ[ytk\Y+԰wnWM )FLj.z-;r}qN ?o˻^ |TOnS z?06۟uB4d::  o.Fa8"I ȍ]|>f Gf5NWK ͫ#i?*SE%\[A20e uѪ.ˡU*60hM]QBB(Rl\U6)br])Wԗ:.]f ;tGED۷ZXJH" I+^u)Rx)2ؙexE,DņvobFS@Hss3L2w]HI͹gBYV,Qhm CHE0،jI ӱU]vJc0I,VS2⹉#t2fVhE6SRFo;W%7.jWY!i*3Sll2Kúʧ5y+F2JRM5˩!aJ6UL](;4Yu]fL|7, &)1f+.e6 JOZ_зbfILѶc6fS㫡M;4H:5R*JkMQAX&*6*KiěfF)2EqPl&ӻo6=C =|pH&J @Q6ZUBG CP*zzu6QB$B}^s޾ܘB_"}6嶌<~vΟDU]]V7wa~ܔ5iȍQ*%fp-eՏ:嵇̱ r]82r4NJ߱J I9I6ٽ_BǫO .==8/mQRKsC/po;QY7zƩ)EM) MqB$<уĹv BdE ۗ`x9 0Ksġ<F3.`J%VlgoB4pnvcTkB|y*T2W3b@=`C0NL3Aw@~DަXN}7>m cA˃`PV=V>ٙ>0@F ܺzVsx@8'e 0㻳҈<:Y%E|r{â a:xRGyª0}鸎^]'pAg ^PVpq2a^^ݫӇH=\W]h㯾:ѯnF n:];0m)}?fuBqg^d;kʱKXRABΚ8 -  >[9'|>rVV=|F%tQyke/7[Q÷0~3T/OrR'U@>6 'ydI=8pߎg~z0c受!}.WwhC8z=^ 9O,$%I:YZdPpJ'~ $Z> x×<~0QcOzo~/ Շ#\D'ArV{vŸɭ@IfEp4AXĹڻj{ʥ2 #W}?_L;A6:Q?N/a9 G Δb5r7&;?gzzsU(@TPn8iڈ8xv}!aB#( Y #*tJ9Ӛsle> BxِQ,P8l8;$dl(2cp믞~ S  nQAd9A:Å!:ߓ~^m`&i!p\mv,9h\^OV_vJsor$GdpCZm 12 (0{J-.+xDM X.Az1Nriהz?i r|<$@ R$ͻ5`؆A C@P X$7vPHS/Ü>X0&=xe3|:QX䱪FMmMcīںhFb-M$3ZZ5ks֍*dxnn0Z=D-VVEV8&&j+i=M),J0q1I67V$i\Um%M;oNj4R+[/5iEe6BTdjxʂw0:5/Brv|t_kD ZQWyyUҝԩӹ#Q-[UfVf *b+=ڂXn#[wU{jُ *yM1nV˼|m-Ke(]^oRLA;Rˌd+x7UbƢ(]Y7U(u{MENVNKnLUk{i^Nʫֵ[6De>NTaoVLEf-eSưӪȘ[{7cn'j$IjVT>oq<}U+jTVJH-ޮB{}V56]Ly6zikjT7sS֐yʜxFaVXEVH}}ju{ݾQܚzM#* k[޷+5)Nkz{֍ݾFn>ūT;L ʗQVbEPWd=4iowj[o0d]&wTWvbLYfsBt^:˂Tn*#6;TCF#sn_U]5h$ Ň N _a((hhA72!@0Gn l 'XEd?"GRAP?)Cdo'wgY?܀-S?"e ""@@kso!xE?U?,mC*CV?gڇQaq[(#L3.K?of#}Qp!s/py#%O8\#! -4Nw:Cbv\΁vd[bs<96 J:Z]9f1LJe̚?DI sŻ.v_] :s69 A۝} ,NzG]j:!OXL]^'{>)+p[\ dU'}fIHHIủ˘ 9ކuwg3CEBC@ӦؕG+Z`-Ư,zv-k K|JS:9D[iN6΋4b>ASFc6[!*=dlvUPdW#i]M+dr4|,s>j* qhd{=14=kWp)VcB[rzDS>>་dZ7(m-{-ߠ@nü K,0<˧/0xsP-1@4֓Ab 4p xu+,#tτDCv @@?p8. ?f~N5[o 9V@YhG|Gݽ0ʳʌvhZgӎz;Iśe邥]c@i5h]C>g[, t첊p2s&PQb|*O|.VY>bqZTLIz\ӥaxfo梨ǃ=ݻả֩=x,!VuE{% [jX!ߑ]Vo[zF]d$Kod W9(200wZR^ &KLmD̆r6qx]%h16({Dc(gf2W=tG. i͢3K*gcT=rLJDO'g R;̔+_KωGYN&(G9 8Vg-T>|c^OK헱k":7lm,2'9al±i pq'eiJ0/h 347,+J&jb0h^y4beѰ:u®w=`d!~:6qn8R;HU]j]xC{PO]'N e٘~uqAk\hq/X<4|]lpe3!5!laWi×(YS}ǟlhV.價ay=^׵:XJk9wp˻]vt37c-9k,8)ݛ`۸]]]ҩmos\(Uu 5rY~]е_tZr!es`՞1Z8h]p1/PIqj(:w' !=!7 挐=o͐~=H@)`i;(".%r8;LX=7;ߚ]24'xQ@m9N3t7_=NuͮObEn'~bهZMD8!U\Qz:P(%~'nsymەQj^Jd⊂_QR!g}R7q4GdI dQX1zeE Jag)DBQVR^l P5*Bʈj^kmSҔIo gVoyfYA㮥B i=y濾IUXͩw1^5',iO[|tj塒H0^$.o8O<KC0 C!sC2FKLB`?_?A   I?釫ApT ?PY_A;&4 C:BX XGu5~ZI$I$""""/p wfA 0?0Pv o$I$I$I$I$I$I$I$I$I$I$I$ˤI#7۾JP'8I&0 "I$"I$I$I$I$I$I$""$$I$I$I$I$I$I$I"I$I$I$I$I$I$I$I$I$I$I$I$I$I$I$HI$I$I$I$HI$I$HI$I$I$I$I$"""HsDDDEh7;EQ> [ }RdI$'I$I$I$$I$HI$""-!!ƂCA=<PCx#?^U.C~B<n"%{`M *xpȂ{:P46~{ABؕCf@ =h4V fuaĄ] $HI$‰Io0sqKxfԤ V!.NZdI'Lv~z)*"Q;`@?-BXO/3 DAQçOycWɺ]؇%9 2JEÍUxS=IwQRFޛyޝ'ԭ2&B[Ϸ`_FZ"( 89^@ j *gW͐'+ģwffLyQzMg(NZ8(&p2AO!DNsZ;eM# 2W]anW5k{ V=-W,jo 8pC~8V##"SZ#?eb(ML@6s|0)^z;&_YuGHFCza%SP<4R$nCi3[DhEKӴXPT:htӝ=:ɍ Ƽu,WMBvnm;8j=sHu9!!'>S1C_JX, yUiUmJJTMmOu:P2rC@4r@CɊ( {";" 4 B"r?4w0>o?6(@M\7aO?>`SN]ZXŠ~'7~Jr'ԜQ+FO=Cg)ŊS"UB";Cv ug0[P!Q"'Y];wwN`"ѣEFq['R8xiS8ڍՑ/;&nSHJG;nװj[烅}hoM[k20KZ9T|lY[ֶf5# >۞h;|)3pAI{Q Tu@ zbi62lS'pJ/?xX˲Tr\#J۷ n)QMP @< Y9fH/Jp8SC* F">srB4Z{8E^m#= T Pؠ2o[k_6X(BTlM'"mV߷#!, wft*f>3Em9} 9많OeD 1fwiil-ƖL028?7|XF )Czi m,j̩-Q,jb-1P}[aǥ1qHuEuQ'ym3}&Q)!dkK@,K=@>ZqWe}I6|kK5'>E7dY+Fɢ% .Yn}w(/NWk*0, (Br#14!hBO_,rk;:ﬠwhH0cQBj4FMAZ I!.h`oBfG@~v4$LEF튆tEb& J<}3eF2q<]ED4k9_Dž{BSZfV$H`!ŜtUphndH2[[՘WWkk7:ōj eB#v©{`g_$`TJ"8$pibB&fD uXrACM|i"wm#8XTXFKfICQ ]׺ ~g_ǼSfe!*4>ֵ}Fш ة2 R%%F|6EeQt+0!EB HjED LT -jfS2)%&MFɩM%l)lѶ[0I-(51FK43d@ݖj1A$2L(C225i34l%IRX"Z.Rd(}q@v6E+⟧~zdaWAm%M~@|5.$D8lTҶ31}:nu) 솶 R( mvyF(8TV=z񮀴u4;qT)295^ˏg 膂*"F$JB$jB\i}R@b2Y.ޥC`2}ij,Z5lMu,{|HH̷ӈP=Ջ!PF`GlPNAiI[ђHBJeѬ'2)*S!Y}PhaqCl1<-Q8 @/v Y4sg6/k-v  Wk\)BD#DNGXH/E.Ӊrc՜ OT. +]`o9xmVesRLFG|E&km/w uoOIz8gp` !ΆzxЏ0)zП)r 8{!)sǜ,nҐ0(YP;Uаy 1CuwPrAG~'l'G!:IVߵg;yT[Q3ZZSM#I6 یJBH<'PI CITEo"FoU㴴qpOx XLݷ^74H,/x )B01fIX!TY[[JQi㵥UTQI$ vY-H͘b5%5a+)MPk[4 ĐCjH2؍EJc"hSKl S6ʡ(cXQ ۍM;g:qf a)% 8Yn=7}i殺6ʜ=Ey "9~k9z{'ExG2k I(MD49[b_HOB!hkod—U;J'LHX89l8~ɿfH,!ڿQW-C@AX E?& yHtw{d̪*-7a#v{) XHp)-~9{{>>|xi>o&NHЙyYNcK k޸X5\7`X_7Jvm#3DT}|6 >}incCT'x{#5F3O03 S/=Y*`[ ^rns_u ߨhg Ͱ^X]otV tI )d5QR4Dbt8Ht ?`q%E7:||>G_ DX쪜DhD: :`A,ACTd)E'kPu^KWi)*JH|#1( p{}ڤT"svs^3J0RR.o$W:g3)jQD$9[V. __7_W  4244Q{5=^4x ׶zMk}"*ŕZSŀ Xݽ^`uŮ_Ư-8#Q=.vv?TP~R/d[K/w[t44ND&HРҴmأlEe2j0q(Am܉c{Zge_~~a` !Qϻsǟ޾|Q+7\m~zx4nP-ro%N0.oT^u &6rɳ~Y'}t$&!$"iB +9s5r*cEġ^Lug!Y :^\ SisȷLھfW4=?@nj I0;0ڀИf>G#[T.9a1Qxk0ͤ$j"qxzO}GE/A6PT>PB !PDctޚxLSi 8QLc3p`Jo7|AFnvs&y8aoT4F-T6dmbK[uxV$TҨy|{됬6`+m:*ZEj։Z+m)mcFMZV-6*iC@q& d"4d;H R PY/{0^ࢫ28 ㌊w)jDQY"4XiALJU!2+JʬkmZV2!H*R =96=`')bdDDDn;ft5CQ 9+q.ĭVQ ͶҠj@ZZJ$ UحkDbE $Z14֋ IYL5"eVb2EEh4TmTX(LTؠzcp DT1D1w&ʞJ+d m)$+4DfUeZՍk`Rɔ%4a5!b`$J%#*,I&&)DlRj3Q6LhjŊ%)L6R[FDVM"cQ[F5mbƍF ) tƎi-O&;_~=z]ṰUM77S6ipfW3q+#7-J6' xˇ?ǩ]O~d-^nd6EsZ 0\SPa@fn$$E$O/ʇsj3o=;S ':};/G1dAH$TL(ߖ/d̷ i׻%oB2$N9m`^iXcF.0rm%eU 2sн)D Q /-+t#ؕ(pE\73G:c !a%KtGC-e-3 *DA 5?}gWn;gY199_ZȽIZb`Ս)47ѥxy7cY@=ʔ 3D@Z4mm!zcY7ᦇӣ[9Vu)rd|'@wң3ȪZ#92VѕkvuͿwK)mj8Z*%^U+Zm޾Yc==R2.@^7x6\wKLؼ[ёDI@ ! BPGRWZ3FoN}ZDY]NEefRfŻ7W|$$$Me:_^ڬ:aDM:fFPB@-%}b5+w~Qy[>E x߾ mC8>ޜn A *0d˜H kί$(I+sŞӆzPcf@ B!sWV|#zNjp` 2@vODOι.Vsg 8j͖ܺV/A>՟xFo>E>Hcpm>WkC"zF4uy},/HK-0猩f/+J\ 2W[-G$tcLU8k+hJT+zũxN^6o}Ư;X4x!7#ĸ+tNy;JTjLcxxMfCюJxw|3ݤ㓗 !˧>M\S@;hERK5J#շ f!B!Ǚ!)Ӯۯ׆;{tS=bA8EPkr-LKJIU@WClդZR*ƣWkH&f(!x$p$J) "!x nLsSKIV:Z1VUCrfrŻ)8M>^ a l֮IuF'Z̅"72wG?5ZeFJ"&Ɠvy*9As EQYIBQemRضVEJ̙IdTaFK)`5U5)ZU4ʊ0QdTfL 2<|m-iBYF@\07[#|G A$"4tfauNv O,YQ mߕszτ*%&Knh=S}B;KW{o`+շށ sm=.~ sh{U%iwysZqX3$qه\ҷnsApp V`]TvJ2)@[͏Ŝ ÓSBBÁi8ո4(9:z/GO8,/&E;a7[SV^g@mZO_Bv ʅE~ض'46mj_ 2"IjB֛sZ$ `/AUb"UP]4Ļ$sހzM9m5QF83EA9nxr5Fpq̾T\9Y >=Qkk7(zɽ&UC)-;&5Z!?Qzbw PrfAcu)[}[dZ!kHg|3~KЄDetH/:>!!\s,0P=ss|n00Lqˏ=wu>( LBY-˅HЫk"ˁB@$$!0sy+dzϮYWzff;Hxz舚,,aDQyLuCru--iwE}#pJBQ>,ƀa iBZM+ ejCvnJ]u%;4"o+g6atX>8şmNң#~ɴFN!Cb?}]E=샞]zw/FEo>B*D'nGbܣ_?oS ^xFճ+8JPYv)"$h\EBW}ɮ#1k˛KMRi 꺑Hb={9I.Lmk`C{G/ ?^\o3v~.Cvm{#0 #ߠOrp"Q6$ vInd5?jvB& >2 ^L;|d8$vw @aa\'Hb?@Hv2P}'{7IOQ 0(TPzEd8bE|x3L!a?3 0HPI1u5HoC%xT[0{3YXcv %J68@L@<€b7. @ RŬۻCs*=3#AEF #ᚇ()ׄzfL3~dЛT-r[^E/t )]8n5~4eG<,1/,-#S)Y'pPޫ5:Aب8B{X;x{m[ ݨx18KP&Ci.@Ԡ)b?CtpS1ڍ ^.Z}Ohx;zӜ%Jis"0wW>MٿXg 3@vw4:ہ(|!m-4pT̿mҾ1)=C*]gzX>s3"Pe xe{AE$׸i?3|(?G~|7g4f)7?(yе 1~ƪ7/3`84,D),+YUsf=rT*o}Ë814JG}fX|pħ2;m/=ya![KeQQb<תAt-w={A bT.S!0Zq74I2TnTMfy"s1#b}vz讹Zn]—vU=@_J+zM.pJNʘ1*G~:}Ȉ9 ss5gd"8Q0ɀ^@ vaEod!ÞM]ZF$^۫KOri uVsw KiW:A*oAk7TJB [{NxmQ~nuVzL>ŠXeIU-EPH$NHxZ` A/̓(kt]f(BNK e8+N"-ZĪ !~"_񺤯cUN|aB{cЪbb?b)~_7<ޣ&A\)!;Kݳ`)^[s^\?,`Kv> FNca.(HNj=yQ7ˀ*J%!kurlN>q%pٞ;p Ւjع _\NTM#kzd:N4↔.v0%+X2'ߞ5>1-t׊[reibeKs]}1}+ ZA2)3N\ʾ5@JTkQ|*h1 p'eGd}𓾝T(O)D=(>|\jH+ Qd@"Tx:ncZvbm)OeχcSk>Wje&2ɂ_c)4ZI-P픅㼧+q#q Z}do (6Y7qMb &+N/B8Xp<939F~.NMRpڼ-Hا=mDG 2)@=di={v[Ud#0X?cq{@o#4r<$|=/OrW2L^$tF#گ]\l\^פjA\+b* dכUw`;}B% ޭ0CE:+:z}?+An7,ѿʓ[p%Ec8?,]as; Z`uɱȪ;G[cgeʬ2XCyA/1 }D MPè,d tHA@V+8_DirVr 1m7zj3!(0C ܤ9sɪwZ-"-6skޠb,T cQ窊*@MNeMcqҍ~#TwyY=jļ C|K/8)>OHkIfiOu~ < o]KeP:IȟEܡ5&GԶ`$SSƭ BE{{*o̸ys߇)d{~-Xnyѽ'했H"Q$A杺F*aOdyA FcUrԞ-+Jkv?C/Sa wa#+bNvDsoz..8u YaAhj\ŠYqn/1I8TXUM'-[P 7oHp'@w+ C (@(^Ͻ+6 Rw>#Ѕ&'7ł$Ǡ;@in*QN`Far xէ5r`ד;D@У.nM>rw[>imfy9eo/(_ӏ~O-F-%ٛ]u,9(swmkjWۤC,3`* tML I bRkg}ua(bF3E+iS)D/G(.+AsXQH )+=yiM{=A}qa e?M,~(Dá`6Ԫ!T ?-<-bsZV>׿/B%D-~z]2}(? dyh K9>߶" PiN'cX'x)]+NZZc! `!"E)f@ B\  vhCPeHW^Ϧm`% XA $b}BvP?PAy=[r Źyz3H *` МTcl} &\v? e4َZ]>A4##a H\ZdFJkf(کʃWT䣮B9 nڮJB7BDy EUhi`^AAΣ`7Z"+o08p}*i9ᅲ83,᭭*!u+R"gZk.~ 1).]>Q,쟘sxCFeyvzP0ŀ HԽ;$`ON\}т$/ rS BwelCPuf@Q4M0c;쳀v8A#zOQ{fIxJ w@ A23h tI8!1 bV}wr#1KfI5{sW@1̵$"'2d.9.:p i"MG:Fv&Ԉau/dfz;b/Dod\ b *m`hD8' jKN U\5icUH6QܙR̄->)tփhx8r[, "\̷m@SVxm6\6m25`NLloEX0!F9'Vw=GT<(;'qY?NST{{|ӟfzLPq 2)X!"Iؾ{v'dCC+g,\TR囝»(L D `NIѰs99.p%c2yة7ڦ6rЋZv.uz nB0C#mys}z fw3[v݊429a rofXCqXM9bn938Khxf;c<F wq-6 ɢ!2,_nprbRp,7]+33NĠ>LSc9>1%҄DO} e ?mh (`}fRY^wM Bxv aB;G@ .]0=?\sPPqߨIj/TM MlXhN$sh"DDDLrs=?n^y .7P q! rܜD nݕ8CggEtxGAw_y؉p07z1D3 O1 q !۴; T~|!ψym<:\N18xM+̄aPvs Ƀ0"ʃ##y^HSscx8pR2D$LNu"SJb!A A@G(ܡh?^+Q7e=пK~&69TG&RKRkA& 4놐Mӗ#r,DRՕw:e `1p8117L6}o >(<|H* 28DF=GHeUyeXp3JmM% d w-馰8ȨN'{VC3rC-z:&@9cODBc~R;®񰦀mgxd>=`!ٺnn*;shZd, B)lmito/?FG]j7K6/bKI˜C|N/ HaKÑz; zqleL9T ,tuؐ@׵!)@p'lE0c Q#ltkr $8/g-"Hv̞%OQVvvV1.롳tƔqK*Ql ѴV{\guΜ8Qhw+s,^K ;3*%I# $?IE=S380p|".Z!]p`գ]/.xuJ=8NXtwN豀؉܁\"mMC+#q0/6 D(@n1#T#FzqP}nLyS ئF"@|fmT֋3]զL$T8J43`(4ZBVV_<Cc“EĿX#!v# a"*f] [_ ݇^:@k9 lakp` &#2.@~$s9_w |5S#ƥfSiṟ )߁4 j 75 Opyd0umW[dY;”/ә,2_ mOp1'hx% ꔣytpsp'`wπV\ 3C ;۱18650rGG0C'veaNKb#\ȠL(,kM%Ye\KY {֎Մ72cE[t4E _ޣN rv;7o먢N?C;|Hsg/[ >.SDH 1TzF =,'tI(|W`uWGcp]6f-pv@!. zFi7Ggy-=7"zƄ@ (;i@ 'eRd*P CpD{ذ'h0i8Au#xtPS17k$}SzB|7ۑ PX }&!'{8xU7,6;Szk5;9U@c$1BDjPί(b}tML2X3 #Xq|ď : @^D|ll,yG,h%- ))Ûe c| O@`Gn>|odB {L(p`X ϹCp>EFp Fg}ԟ01  k͑llWȫzULGX{ f4qb=2vf@ݲpJ0P$O<yr4bW#,ոNweA0{6x>ĵPGS-%b9Uyo)1Tki=˔(X:MY%XRpeA)Fr&-Px1wk.6tаtP0 t.ユ8 ̧K4u380~x~ZpOiqi%UO!:YAjyDJ<ƏFWԶ<~f(!>49\;<68V[Q< :. Mar1VD;mKqm/k\8C<6NNn&iVBʥ }[-~|u>$n1|n`t<ݙho<D0L$˨q &EsЅ Ny%Kua7`l(W [@yR/M@G6 ;x`DMDмhk24."PJp(@ VˡwS0㡀MøAG!$$ 6',)[k,)/pCp4a답 kdX%,|$4u[h 0N#sdCHb(hD:=dWC=vȖFmp )1~wsmk|s{i(>mfuŠs\,8H}`PEmׇ)©_F"B <jRo-rY`-)-RDe[Z\r(_kC{ 8!+n+}K4԰lMM 쥄 y o<CT\A\'(!Rgbu𨈊{n4l(Jnh`/b0@$'EfCC݆E 4k 9v1٤~_La;_q{ I>SB~1O/<? zxX 8?4:Q#! 1RjveΰIpCPsDߎ=]ZSB)Q@q"n7Y7%MPq ij%kEPP " =`4@Rwm\+p~ ,ˆg+GOxtcɎL':f(ǗysrC{3|vSIZ&3l܆V[z#EhY) @_mK)%|w>6}u7H7=ܷJ6G,Hn %>F!Q>&c1೶7$0 ~DLmF%v4:d@"lB t`HॼO%pBצ`Mf4Z:<Ǵ`}JA)Ku\:pdw AޕD5XPYL0,m. VxX1k81k{{9dpSh8C̑Pl(l~[r5)aR *Y,+aPmm#@)A2;@]sģ`S49dp^1XQAD)>g7 9p{Q>r qy>D`S-zg ձޖvqK3g!R9@)祹q]rP7Czlo:6(wӑtLb3%goPx`vbCaoF]*йlK.ˣv] vj`qRF-:x5 $(Ͱ!9 [w: :n*4@2x(=s(\w ݯi`)>CO?YagL0hO% 4^a|M }/q=*p $pF+m>Q  rAl6)bޯ`q@X%B޵lY<:#{>0)PP9I˱ #@{kr&!]N3 l삛LAˀs)8M h^[\ƨ1r˧D2ݲ-r$W=,w,6C,E>C>E 9"_FtSA|xBf^,IO(f_R~nuZ*B?LQ<lj!1WX2ffK$5biݢme җU`4S#@@e@`!b,_aϿ쵦0S.X-(s@xCru"Jo|.$D)gbɣ{2 ǣ^L(Ni9CO~cO ,YJ퀽a<x엀$*8|nF,> 7ԡ)'?P" 4F,%^$JJx2 sen TRO^ASE슓'0'n~`bsq464T3K˿ϟ8LmDyFvoaDGᶀ:& 0:ߟP*P1cY\b0;9]o:=:=TM:ɟӇiF=IO vޅ#ڪQ$ =J vྡfj}#?ܒ_.kG_/wk?w֌-Q̃# ήbr Ņ4Aul7nY"0#Ӛ>9)Ɩ`Y _65 kJa'lf]u{56fpW'?f,,{a[ri" l<<~nrOo&!@=r0$ d h^*;YQxO3jbٿ@3' j+E!+3kg31&y;q㑯^v5.3\S>o_hv)c+E}>|pnwȾپl dã<Л œD0 :&N _ta>$ԅ2N[p]˻$-}/}5J׮זZYnH*IdXeUZ&q| .YMҞ;BueK#tCٕ:7'u8}CMz}ɢ} &.jtV=/P  [*Hx#swkŨGURŎdw aYdTwJ`ؠԔP"8tMN B  1E 4jhvk:J3/(bw:ȳ5sl?if tb5r*q3hM%r 4a1IW' 8o{;;(8" rZ4Yrn=C-@p2 MJ"+Za.I6l;\yW]_lv$6C<%nME]wMp82*䇈= XRlL1cRl=yI Dݹ9'.ߗLb 97ׄC۹'BO9aFBĵ+js$  0Dj^ @`$퓰yuf$*+%B #=9=fOs(}(g!i67Bڃ'P HO6kR`Q,?cԱ)2Yup3h;]?j' 5!Mb, Cmx(hmׂl[UyX'2 "LKlSAxlrdmc4}RSz&KwE0ØnfaMDJ--u9SnEA߶f8q#tcaF$S;ihp/r垱t ci9ȇx oD9uNrP&d<$,U_(^jk}ؒN]HƗ-nCLWKZ9ز uB:ײA@4Gv_AKf8w## ݏ;/wz h@Pغ^r"w`|Kz}ۅ5i?lĭux/~uۅ7k3Sɇ=d9ˤBJ[heo,l<m|+L|DB564Q)$a"[^ɱys<;4x[B]!xB ;r<**$홇=\&pvE~8AOP@$*L~A'٩V9.n'XZxpN'N(,9nI,6gW:^>4rcސ)&Jn< ypGxyF34jV J5CC &V5vb%,vkL⪟Op'۹!87q(rH.|:ܯ,Gg8MlF| xs R#$%5פ5̽L>@g!ޮad";nM0O†T~'< #KP;hv kddq$_8wr?)3EmF&wt ݤ!'q8\3A2"&ueM09x򚑁y&ayN_SIS8`QA2 ^{oNY()<8׽b^^[{[H#*G~Ls L[+ px~p fe5OvYIz_"dd$~򂶓ԬL ttH5A;'TH59xxcNԔuPTL1yEcv's!4 EӑnNbDF1,7se1ָ>'5Y:|9I^ p~:`xx\;ȉb#Ct_u[^)JaED|1ClfAc+VRX>$1辰b᙮)rV1/ g p2iXk\-:0l1 ;Ƨ`Zm O 9RCQˢ:T\.{ q Kbcfm9Dsw:~t-pǩޱ僿Ɩx[R4JwO':6Z`cC%#TD}r'w|Æ|:rmĚ>]Ku$ HUi`DTd1UސE$K,D0'0X!41,&8 ;6z`iHM%b"![C?SfB- J'ؕ飽֍W>IbQZ;CD7 KoZIsp̈́{CDa3c{srN +йK |&ԍX}vW'ǜ\p? 2[ICs9Ct6F@[΀۶gr6N joE  $"J!_8< q#\0Af bH*04 u53Q`_LNX˹VpƼKYXUԃ\@ަw:hPh婺v?Vll1@;X$ `W5qiRø!é3tdcŒpaa8!I ˇq8\P< |}7rEUF0t'm)KVAXOMcUH5AEQ٢(20ḽnMSpXyӀ:ziۖfvjNؓ#'hL#o~ro&HANgC1LC#ui|_p~2s>E/53s9:B,JLd$;.w~1} Mxz¡=E|ݯJ0=(靡5v<2}B}<<U]띾/i{E,!И[~`x@'BbN@|Ϡz}D,v*tL,ΚKrgky`u0pz>YuP[-k#0z`v˨D DNɽAb UG9#z"2i+.nbA8mg9v(rSA18ކS]y-7<3Ι:NŇohn xcYI>`&")6*`[x*Fm` ox BHB73uaꚇmzq^] i(a{Bs8p~s=O8(n=T{ [N:7Z>4%>Lh]1Ð C;%oR MC3;b9WJZJ0ݭ'>2H*#"7d *+l\&nnj 4$K! V% {A~x6mpoe_#ӛ=J`rJO}6="(p{%d@qޢDm8Gj4j[U3o -D8QXDVz ЦCL'N' wtL ӻv$P4X!5}-BiBi(ys9ԛ-QD RXw"!FH8KH\2ZO^yJqc Y2:S >29]Pѿ$`r;Dq49ET ݌ÜZmkCW_Wl ĥCsÌ!52D %43ɺv#q;dV灀cmA`9 ;6)yZ<ӆ)|7 1aY`WC8drm1, DFz#zӎot*zYdf2j xnpăXCDlCuY wTDLƻMolv%+(q>rިʳ6|!I}NG^$(KduyJ gb;Y0R.-8Fw@"upTY2SX)hwǣo0@˳/̙ 8 O.b8w[X.,n -LUEjǔ&:/O3.hpXoSya%JkcЖd>&f =Dx oP N4u^;(:%ؓ;g`xXrQ ﷄDe88vπ0D( <J4 &>\΋[Tԩż;M;dѧ(PlyHn{8QECNuzrNQ-uӘnܲb e"yuДEB\UOy,G}T|%:>$ ߙ4$/cK#GGyɡJӠ9rtĸl6{ ngp<{ 6lc ci(UEA'grGgH/mʬ̭Ԉ)f  5 ˇ\ner/ @lT:3İyi2`b|FxApWX`{9ᄟۜq&caUk>lxc|hsAElyE>\ e3D;K 5-py`(,y!",q (0V3M<CЮF3١ g@(%>8l\E1 6tpLgtڹeߎ= 3 8sXC K5J5 SA0|j: Z\|J;q'}KAǍrnLAI"?>p5s\TMQLPW hҰ + hH##T66"=$ό@* K2Aה:VJ 'vѴs-P;{ަ zsdB!ʒs$~:\5[^ZO>rY`} `~$i+Em[$=(=i8٧(ǽa7T zps2}4H %:paDI~{xD1J!GK,͡NlFX 2E;F>f6( 12l6H٨0THϊWX}G+>V(V$c15DCM[~pɦ͘q0CNFc:D M }h‹f,d4hq(y;☲G?j+Ó9I$S@cRvÃhqM'pwsS0.|9,P"?Ƃ4C?8YM($'LROXqA!p ﭔɆ@СgovE!ؕ7;9p|즋LEOhGoZYu#FCD sH$,$yJľd9 2R ! vS6得*]2nVLP'E# K/#+XB-Yi(Ti$$`Z&fdNg6ꞢrPl>cA_(cޚGK4KtZQ8a탤6;oZ֓Ru#:\5#Gpҙ[x`c sO|ubYUԽКt( t6:ryclۨ\AQlDH$'8&nτy!_1`-]nqRie B'm14Ix Yi9[W@* A&Nމ  ژbH!s U9nĈa#` m\L;P 8 II~vI]ɭUQ#uۉM[۶/5dPA R4d'D>0I|jeƩ!Y}vf1@7 hH>&$DAIyˤ"XN6!/Py?f b/E-T TRԄ8P:){>E;9Y|UdGƆQQ?7=qJS؏yl0AxbXdTx}Ǎy"(&T}C_45C1}a(?9OM-%SG~M\Sci,kFTbd䜺rvm)&a;IFmQ!Y)Z)t< S_fkj$TRMfS5Cf3 (BXZm(256F 0i$"iH`H!Q!`񻴮4DC!D*")~.]9 ]?1eWWXXo~cK0RN@P>uzQי`M$2UEk`*$d1rDƆhi$%`$rѽT΄!$<.)!Ef4+BPy fg+<({tT >Q(Ll~xǒ{b`{ ;=R*BGݗuejPmT2'>8P ,*,(%@4>>vx]g߯E(~(ݠa8d?P^Xaofr@MO|>'OL !P( hww.>?kioaҋ-  P;_ B"[Ȁ8Pv෨8Z51gmgf8<0{ ֆp}rfӍ Tgp|[tpeu ېu4/Ր`PDÛ}k(+cCoNJE88ĘcL[ ;aAnom<ԁ E4J]=Se}FD7?΋5ԑGйlNav _~}m-TIM}I+)3ac{/ow/?zx1C;[U/Wg+(>G3qPBčpΞ>3gw~~= oY>6dfAG;lncorKeBB@[J'a3 ,!?n}A e?@ȟDx O1ky^Fd1w'`&C"IJ^Sod+AaCҬP>yqS:`w3 o3(/Le-%:?C[kXd/S6E┲:ɣ?/yUfwW> d!!]&t#A&0T`2V(4<~{~AYf 8_~ _{hByHyQ%! tw C8˂ eYو :$! 0dKm\_Eޫ1pV@y2MH @/2"8у%Ukbɦ kSQ-[gTQbJ@?/Þ# (0%2Q! VHx8^\T KHĜ:j̣$hhdFA D"n8M3܊8|M(O>8 k69/6"0y=K8V1cK3M(X00&ӡ:'HX 80ǟ]v?L +)#1&M,H7'f1$߬ݛf\Zԧ6͗lڰRh0AE D;)as ;+訿bF C! `bc?w03*Cd3>E%qc)(}}ݐ;« &r9'c/'ϝۉ;G[9e:G8 ܞ 9#Gۮ ;E" <lnYb_s#7ٲoKvdD= HHQBɶGJ{/k70]f Ѭ4؋fx.Ju5*VVP*,dPhVH!(AɈC)c+$@+8Bf$c : Ab\U%2HTkYRP%ZUP-(+(:VaPXN 0ICiT?Jf9p WAO,{K'df@;Ϗ-〳"Y9VI)B*}iH0` %?%:0BDUUWw8}o߷o3ڇES"0PpX y(=Oz85YHo쬂 e35 7ċBf| du  CCw3Q+Z2D!IW]/}*!4K3oK1 څyH:FK`өԁ["Y]Ŝ1$w 'z:bI9 =ݸMH<iO'"[r~lu"5m5iDX j3XYÂGJ Iad Ũ& MLj6f4Ns!!]IoeҞ.Q-Ȣ"y3+) YLj>8OSمp(jxemQ֞9 xm8+ZWiA=ۋA)q "E,d0B?zW!䕕8dA 8)4ŒqTQVFJaS1;7J2KTO﹘7A'c*Q(rsYpN2E(j X:H 'ſ|ޖvXQQLϟ0|zˢC: q}$; :$o,陴)x4 BdQm0YSwbEYt`aGpz5J! QTg}hD q]ˢ MN,AC[S co?[;L>q@Ј5EA )  k @TEZB(bPP%5kj6خsnwM4RsׇTǧ pԔ>9} "=Ȉ&],%9p'uSfZiaXB؈KAF $FMh°(gD"!߂%;KDT}5YdUATXI{ ;9xML*rWv>̛H5 = 1Yj<S1R:pTo/98   h@/GG =ZO41py'SRzق~} gk0z]۹ K2¾wy1A" +)O;IQ+*t!xI7W0&^pBc:kd~jq*,V!`Y=>n<(uՙ,mX>AyE=!,L0ZxK2H? *4k({-Xd `:]%H'R!&lܒwcd}7C$rp A!sY9 R`tȦH83!iJ_< M*ypV,u1 lو$.ҜZg])䕠:'1$F-᧱ (O~*ߓׇ5RL00"˷( <xv9EqNAl1 Oڪ,Cip=ĥ ;J+.\1%zFx1L>{8RabOHvÖx<]&gNتlqƓW-mxΩz>nK.Ħ|h-}`d:լwNbh\_ }9}*^aU"?(Cȏf NC b*0$ほvr)<,ńtD K>v3IA,iOů|+ 4uN wݐuWk11 HX옲v!ܟ$4,9'VQ6fyp͆сܖezh%k8 z,$jd&qpCM[ SI cW _`L!7rl;u'ltޚv`,~xYPd`f!+pnpiL ̀LtX;}=~;JX2u3>P_MtHTw14tKyAvt]!Ǽz}wυvhO! gg⧾ֳr@  @HvY Pi$D ( Bn^I$@=;6ݿS*%j%(OY=Ě< IS"8G?ʧ6?(:4:|!Ny@i6,g XA_sB{ub?w{fR!I;v}WͪfjmhhJҙ-[nJ5[ʤx0I:? YE eVH֙jKAVf$L4Ca1]"|4;5HDQ™g;м!I( F +܆g|,a*V؝47WfY M5Sɢ() l@h00k,^58]Cj%#C,t40U>;GQUrg'G—jorkIڇTJU.nQCc^ƒRغ};q3֐KVֆx(MQ]:Dmma%F!UĢ`y }/x~~ﲼ}~\IǺT td;8Y*JZ 'B d~.xnmCql.P17<ٝo! |<!}uHAIJTxOh=B~*0aF'%+ҡĺ<l6V`̡//6a;Ą9lF&)v@,S?^gkԁeRFbu X}<2 ~Rw%;Z LAfG ODmvUrUvku%)~o޲''xNx&*[M@*R 88hEK&4AAw:<)^lfh=pd+ yy! lN1 1~:0~hDG֜0Jwˆ뫭؈6i%4(uUrBƅCp-{^U!4Hͨ`1!mpC:`N9`.A ߮2 %'ۼ`x~H)W3t{Xҙ ŽkD1IlJk &YjpXЇzW\u 4;ȁSỷzקl 'L'l-ܨ1I82IT% MƕÉ 9=$ņ-ԎDbbỷ+zb42G$ggOzv䖁jCֳfsEuBs}fq0aF" z@ `w!u2&̄p4?WP=x Tj̤* pJ~q_w G~G{ַw]1ԁI ky࿤$"mR |4UӽP++⊙c2dw#w. 6j2m74%Lv7X!,M!`(J TߩC%@Pv8Bk+ (8ԑ$k駶D !4|ϡwnc;LS=7FƏ=6! Ruz8@5cA]Ρ.xy'n@%wEX@qI 2cs"Eti$&hL4qa `CyL8n"- %rĞ=(!``^C ^,1Z 37ζ Q݆ٛI3 l";(ӇlQ T=Öc Xdwѱ8åCPp] 9v.X $%hp-mkmG~=J͙$^enz㖄xL$cx @%1l%fc!lsdQ%AH&s]f y=7mxIFo ]v"1 ^Qݴ@*5D6Iw3H&}$ ҝkkiyWs5amOCoS޺;Ըrmk#=D TBC]&+ZKnDs|n}ޡf]sgI`I^e_AhaaagiN'v =p۞]s,}i^nN(wa+S <;{M?/ %NsZ=oED|f-" LΘcdQPwA$^;YEs\!QY.ɍmb8\23ndƱߡ2INl5(eb<8bmJ+4-k{4m@^4ڀIse#}H#[ 'H)U҂`l ICrCd}L:9>>9%v||{йN>ǁ!;& ]NWmvr[ZuP)6n̚h˙/Qm̈!T˜M9DD1ȺB5t Y( ّnB3xB FjAJ˘=/jݐT36YA^.臚fdoT6b32e*w*}bĚqӦ!_3Y]b:waჾ7+=\.9C#0O[,~,*݋HswCO(HdEX!u ֜*C(lҌ sՏ!E2'Fʶ.Gp(*3ǞhZ8\jv6( h7ǏchCt+-% y:|/5RF>Rox Ӄת1xͮ-N߽|Z{l=w0g΃AT>枢N&rK\OcT[\T'*+UTԶ_"zi'DG*1%^0|쯂D}lYÒ}Ka OCSp8P~35MC;&:g;Y9yKr>h\6C\@UFK;' ri n`۷ L(bzDjSΞ{ڎ-spgRk/{ztԓoBoȚ1\:G1* bJfqwjl 8 :btl[AB`,р%a9&87w13 G!z`,q æ.] %dxvjP\ao.OsL^[N dkgMdhST29X98f9 ٓkeDxesD|r0*h^]ܹ hxGw;LhOFK=B5bt5f&u~'=J[/K$sL)6ׅ1&>PNѿ1)gɢ%C#@ I7C,ⓃwfhC..QpޚQ3E'nmRA-΍惆,pݶqsbgGdMz# ZKئrB ہڎr&v ;@ aRDK#b @Ne,5 oשCA3"hup1  qa .IVN@.&Ð8aXDE r̺BlmQ]b&6 29 Дz&*d)RXlFA ? R=#~C%G@hDJBOM%P{{eNO2O˳ifv~[ЄN ;'Ƃa 4~,-H{V!ـ5}Qyjz gqzro;|w D 9]cH&D%C e1@8 ~'Y|}=V]~*ϯp';tk(CRm`Ȳb% '_c;' Ba 4rҴ! 6G_BuMtIyf]jϿ3o B/~>ШӓHX|bXo<YGUj^+{Aןhs؋Wg4x9o:˙꙲5p{oJ;Ez5t3 X MhBLЇI=Ŕ4D6*!@gIvS;a"> r4L*m,'qI(y}.H\D0@ne(lPqMȎSb f}, @o̷jfQq |+(MKMu$uLhg~L꒩>磧N9۶Y\xvp)N&^\K*T<$')ɮz.P0w'q"јLxD # XXђ0̆dLS&S,y}hƖAfؘ0 Ʋ@؍5 5+'2鱹"˯OYvTQt xBG}*ca!@>;Y LY-\K>U6Ҕ5I`e$ĜʪӔWo!]<&[e7 6@¥ ,}geԁI hqOp|0,6&+t5RJ{)Naju!rL3Sp{8!TǍ϶0j)Sw00q駠A(N:X i0 ]yB,H&J?;G&Cxv'% euCU`j!>^)s8«OA[ŀC 4(c 0t&>[ޣPB*1~)Bt ;m9%҉RDKIRM$D&hсiZhx(||fb}7mM|0n_{BdsӥHDx0!C%KC;ΧR h?a=v"xBztl!3$PY؛ tӭ}X6NaPcbRq+:/{eNqxGS1BY'q?AŠ}ţ>qd"H:t?pm?y uKRPC>h}eKT"^mIKd$Բ~JJI`{ vO#$ NgchipK?q8Ȣ߁65eev&4H'WNYM 3&# tNgr-7&Zc0 7)hK( Z %G4''NxL2bd Z/a9 rdxCR1 (3\"#n##.QV'JGMnqatlUx ʗBv\o5l8H"P2W{-s0c8U̻;%͎S5ES9WQ%;%0,(AtKLLw!dp\asiޮx7ѫ* "6>yhY]iy =)S%ǪgNb/ S`]w$,H]@dDJ/GG=gnM2ArRsRP@[¦ g2p󈺖a 8 NT)NvS |Ó9X6}t"3Oyd!<  0a9Cq7 ^D{' Y M=Dd8vBQ!@=' Z3%ЃFfFIJP) (f-JIT!D:i(<%Ho]FEq;+,-jCbȠPh`wSnF HG - ܉&.oAi7&r9,6yk%@(_mE!U| Do?}ۤ5T >> Ju pMa){>[z{zb b| V Z7֐? ;ӨhVtxA@1Yxg }C:+O]Č +9jӓ=W›b3DCڈv@LqYZdƒrTJ)hTZتc`qllF H:a @$΃'8Yg:hq?0 `Ĉm?<\PrS?d5B  ؅w2jT0}QLb~=u^KiK4IRSw*!i !7RK $I0u~~0TPF GDB z() `i#2 t&eB . KQ`KC;h1EL]hF).LN+&e %ܑͫkMpReՑ::Pٕ)Ke*Sj+6>zQC`bD" x̨f"!y! " `&\"yN$(%'0gê,/*qCh=H/4mTdUf7P@GjXKme(e~&]40 #Q$։U-ʵ IJ&zl1t#ln>a (2:$ZC Y/AS CCU ^ZO"!Z t[;tR5 n^C$V2Z?xBP*+" +qΓ:JHV#I%cbeh#%&sQAtt1(KshB؟?Tf$OlGAUr^5;:1p="C34$Kzn[:XvFݢ$!~]甚"fujt<ŋ(%?"4[,wwwlWq24gvݕ Tk06xNԮv]ӬjU(w[]Ԥ@PA,-ܻAܑ@_|>#_*j2`ҦO q)*J 4"f0(P?"WY? 7u9={1IDQ (5U#UhP+ !SxCra?o(/QkoMMFmXR,䈧NNO 2iW=Kn^mp#hFP* uL&&+@V5te9N,9jJC͝b ʾ4C}*"H|g;?uV?<pt8'b=tr$3`[^-W*#̰u)tژMy^zbt-*F"Y&qtڊ>H QԀ#1Dء-jp,#2l@XjW" A1ܹLn,9s!}A%xOG'%wi.mSvf8[,!̰=xvߵ @c80Q%(t {yD`ntANF?wP1rI&u /􀬏NJȗH*zwgµ^=J@_}6k؝58՗/N.r} I$ 0$)خeRC[ "@7|2 =Eq=NFU;P?}# @!Ր?꒒*{O,Dbip3ׂ`@aK~H¡ms}.Cy(TOh~v8b%!)9zZY U(!<| ^`3 ;P1A6Kn8>b`8gu7,jbg!342L ?glA8s/Waqk9/Ct.;UE'aʀ#< 33NCp?{i/Ʃ8Tl +TV tPÅ{RkEK֏"ڐ6" <#AY^_ǃpA~H*l$h^C"Gb P*ws  #UaD{h3p3FO }B GvW$CrG*ԋN/ESE1FMl5ʥbC `S&٧&%Gt$@h Q!'%G[h=w֪EvNq#`'!*@?HX.0f u^ @CDHh5Zh uhtP0%P Pja[}(6뉛l1xX%8c=닿ym( \B>oiFgx|!rJL=5Rz(Hmb=-%A牔;E,m^7(^-AfА2go=*@"}^Be`.Xs(W(%$ 'DS0l15Q#KfI# x#$X~$9 A¦@;w>[;Yg~KN_S90û[eJ6*~a" TyF$w86=7ߵ .D:Ᲊ+)!oq$"(mEZ Ppy8CP, NA͐>;ۈ @bŘMNf +BJK*jRM*jF:4E݇ho=%&O9 iik 0HCm;_6|ы O0ۧ%M#) #g9}J"X2M0V V|'a񆡢I4!v?ƍS$A D+ޏBaG>n]c;0*fAD@P}g:v^Zі6yA$:7Qh>6K.spqDU IE@R;:svm$,3)7S= &4/R^_16lͭnмBrV<Â"6~sbfBů7ue37cɯ )bTvpF{҇PD(5p6/"*x\ӷĪ`8OOתaۼf#t?NAѩ{̭)rsD(Uj<C0pkHRqoaaq"!aD2n̼9 II~bz=%=9 OyZ%BqO reBa Jk%qMC[ w:Ved,ikJ(.`\Pҭ\ 0!0!76C¦0T4C~ Ѷ"$ H06/i Z)>0qJulB@R(O l!Jrß! S)}t#N΍Q@{OQH*L0)&m-sڟ6 väqĨ[M0zI!ѹ<9BFS3ZIS]̋ۈ>>ߕᘉW`,Gw4[XZQѕFwU/z* ؿF)Q,浅0+7*1m 1sGd9aab @r))RPdT#uDيib zB|fz (ht'0{Zא9l)ƅ=S|z.0iz4d" d"~H`"Bq> EnөhA?qg 4IdDd$P J>6^!+H6 >2- ^ ע"R{ɠD>r@"< Lʁ;IJ#"0:o-JS^]EcY"|~Έ H{ը[)r;m5FY5Rtxq-cR'P5!@ DB  `8 ss( "!7[  ~-#i3ٕ6}*P"B 0B""Ti$dɀ-*H`cZꖐ$OBhA:4xpP a(_ x'xAI!i~]Uvabz{׭A1X5<_1s"3mhܾs) u-g?='`ֳ\V":)7.{w; ߾߶6<|CvvM3%7b9sd}VQ2)jmC:gBnrq2܅g2TA0Y@q8q8v] sŞ|I/b|rfU!٭JөպWNeEgj1bεS`I3AûO7-)#Iy31" LbPl+R+IT3>lʯ 2}s)Wɇxd(}&PiD D9ρL)ŚDhߗ,TaN#Q=SvnnUT#7X v=*`yyy}$ DuU*Dmߙv7Rt1i%H)]&!`DЧ~g;ݞ^&I)4P10Brm;WXFԁH @)á KmBgG:U`[05f/XMuw Ɉ LŮМ+i{ &C3kt oI"U dyE3ܵQdHUxo^V;HS\ ZSU.d: Y2KF"ovrf V) 2* !vRpBzæR!CO)}Mfp%Vh 0QGh`9Ȥ9 BRF+4T  0,-,@R1 VBPdŖE\""5 )kI\[' dFM4ଌLMm71]#38(GW`^ACRj ̊0r`ǩથAXxO==@E߂œB}݌:ndf*ꪼ 7gA93Q=!4'NC }-r300D4D)e*f+.dۤ,H#2"-5Nt;JYuUWKeE&2WΓ{oԣ<>=L?#dѼ* ޅg`h 4Já'ϸ/;yONM&Xj^x0Ije],wyNIzs@\lQ%.D!,v|A@ =Bd_9>}gٙm%lhXpf8m!ꉂaS CEiHq8\PFaYO hr_rJۿ`nDUE"UDKjϿןZZ[Fo=Ym`(3Ss[SK0йk-HP@eF3#y)Wx*s1zBCn'*Y#7dF ޅ0,5%D=<7$ (`7:Ykh3#jaӍåns+H;kkKp&eb}? !?<栓|F<:gNHR4Jxxc)bn (v'/`tpGK%:%9IN.±vc 8Pਗ`H NFJ*baN-w6wHpcsglS; fQ2L\9=:JX$bzxבŒ:t) ) Z0H%)""mHcF(..黾::v^)ؚ봷Щo vI OY+0"‚A -X!/o8q[\ssm%.Ң9&$/fPQs^)oQWdEжWjȈⅲ.aGh2iR!' `S<]iZTƓPp=3Jl$Y4@Na'b| D/IńLbP FOJKCJ@A̍iQ0Sw,1r`b6"$Q;G:E@qVGu"*u66ؾ nERxp硒BbDdң&A@ `AB+T(p?Ɏ)G٥~x;a6Ѕ "{~MIcNe?D/@Bqͅpbk +C# ʂ15(c(P&(gy7=ێԚ،=zMb5L@YOsf)IHICu''| ݰꥂuCezKZ( ,d!'c0vKBցp^B8 U~ O<V,>T_95U&@ן2CxI833DM;`ȫ6<)puj؝DAYYJ)v#Z\tP:V.@ɕݒ 0rwk4L8%C`;`CZJ$ X- ;힧1-Yq⋻xHfkMbqO#,!Az'(`"P79vSدR4aH6D9-ysD9 s<@= =eR=Œ|eH"ZNϜ$+dBly|i6>s;‡S<+ &>;-͍= QF@1\WÂhhę2kLF5cK =ɚFۗ+}~[ސ*!8E8 ͨӚ 1# d)ԦxJV)(,QsޥJ&m̞vʝ +J-6(>|}u؞BHoQN$Ad:Nn'@}+÷eq":~I,D ˶Ƕ ;Q@Bh0z9}lkBgw>ܔl(FwImu3]6XwGjjxM..;9cIS]RqZ@-,u+=frV10s^ ãOI_ 8RQK,b.E\~9(%+O"."͍OĔgEBY"t/IDsfIQX0E$ /Ҝӝ!qS>hBmCG QLEƑ^c?~1HT0@<#Oq("l[p!z(nġ5}^YƓ:99J \"h0}7 ^ @$+ =G1>Qm!n%Q>*?/tJhT}򋄨{ry1MPP1:8CХBU4Ť YnrN'[xHF$gː=A}!Һ!`)HQ^TF罱y[e߄ɎEkRX>@QColT^Ȓ"{ ;}=c:(AD nob F s5W{}a/JO~&8YQ(bTl-ze)yO<WGy[:7}l4ET\8V&fINrDt=Sfu''^.<_v$.žvҾR^c,%UFnS@̆pН$ Y =RٰIj!D@ǰZhp<´ЖCwNMvs M}}u`D xZgЌjRVJoe=rvO`I0qep6 \B cmL 7tl)ALF h"N2oc:v+d4 J6p77JR @7R @#Dc3ά>/}"Zp3)WMSHTnBS+y-J0B0,{=ٗ́tx0?8LtwOX)'( ?mUO丽I_z)bufUsU hShkc[0ܦ vF Q>Г@q '"G~tpfh(8)Y_ɶ`;o,r&# J:$ ha>{"e.D+LxI įN@2vBS]N^rV(&9BCALQ=rf}5k*j5cQM[ A`3Q{4J#F(]AG%|W,͸por֎La3cqWJ F͊"~rC0KGNR@A>G[ Oky; SU~m˄г,sz"G\f1Q23e>A/ϓQQ-#ɭ1.(0Tꛑ"߰`.R[5fKd~p/pQ@n K%+ v3{]H.c g T!?"sCx%!hGo~tCIDE=@uH脢F@,Kl,>/`j0WA-2NuS /&|Se 978E0sY2p eDL BRO*8%$H],,L88?kcvvb%vKc.F7@YNS:5yx3E<Ƨ黜ā)Ęl [8^a0]Sӟ 1'V۬i83!|7AF:.M ˂yˎ"`nl'BCCSęB ;- [+G9ʋ-%Npq[8IhnpX@/ @ijVTəҀYQK\5Y-OCOXS>N #w|z!YdmYQ$gsc8wrcntN;V.4&<VB픱Rcq\{trspcsN"Y \B&Ĉb.`Zp'ba+jC'p)@NmJʕ*^`;HjA`B`)%icR jyO/ Jz:Hv&Ki=C__/Nt 5̎XB()aض`&e ;-__ `ʌgN#* O9kC<1E- X3NFT̼?W{KYE-AY.XC.ABWxjKxT"|&vO13P-n?xjg Q85&. 0H!V^l\$5[fTkPP0q,2*D;Kz -;0G )[H0ei7v/~6TKn08w4820V,А钖rpBo`xZP冁p&8D 1G 5a8cヾZAАȡvPhJCh :!$,/P'T#hdci*AFQ6imТL D(@̞@'836J5f@e%76NEd 3YsQ|)L^^U#HP RiƴmHJ^eS<;H&Aq~ uN{`}<`iOK$$$:RoN"bL063rJ6XC }Sr;?7~𳘨RaO@( 톁(Q(iA CKuJ~=BQQc|[ -MRƛI56U6diTɑjdAe*[MM-iPe-6дXIKMld-9wvŬZk-fLLՉDF,@ثFR@B4 PFOĐ&;I^@x1qBd!x$}ñ Oa4b (bEG2hR%PшD*R]"r4b}p0@0  6PTTLQU^P8`a+s!@)W?TdC (u_e=l/=EVX ,y`khqa`vv cM%Af!l, bHaLA,D"ŀ0, ,Is!ÍdB8<ٸ}DVL J(`}RQ=t٠XB9R=lfCZ O\,ki~ȶ.q$T2BЧc aЅd)Êu2 'sf;b0u_5PS66oannAf|%S&ۊ𧗌Q?)8cޘIPC֔#5}eT&fkwA9T3#M* LĆ+q><;Uf8d+,~(Pe*MT0DIpťr[c"LZ(dR䁎`S r9a!BĀP%@KB*+H&A[@`!ՙ*5C %a`RBEl8ɤ4l2AԉBQhYT)l+h#Քңy\N-jQ7cs[hTXH9؅"z^kIId,]>\iFP"  \_07ÃmG0  [΀ H;48n}0$=r##̉+ї(he`į<:W;;:Lc J=tQ.r[EnCd?:@q) _2|jZQšΗvBn: LȀh<[6KnB&S&8dprپ4!Sc]+(Wi KŰBW "z%" <015i,'f19 'hVeR iqcSYc.P 1䃷4$3%=ç1WSԶ-=zrOϏGVhp.h>_rZVqΒRDTC*Ь~!s"X:aPۄaFu ,[ xhYugNcS?| /w)"L1U!U>?jBr@l\Kך5}mֈHLfٶj+5,15"^1/B!T#@?)dtT1110d ŒJ`S c1H:C ZaNl,>a +`A!,፦̓V6N`(´mKmP ԑ~a(AVB6тY)-B0 c(J$h!0tHhLi>(>}ZACK0EKI>";PCB@~Ee-ie)xsS DȈ_Ad.gx$`$fy z3 X4BF.v$fC'cY>[!?5a]d nY-|]gDA)xvzk`Hv GtPTͦRP5*8,) F5LI,qS{(R U 1A1mXtTDc08u,X$Sx62lZfGɫY(eh(" O_Om{J@Jv3Ihȥ6#( ?aH^!p`*{Jن6DX4C/c05?dL:Gub8֥2 ϭv{vP9ƴN3L2#TC.*(@AS`~Ƈ 9@L;yɤyCR* ءHj@pwIa Lޡ#;nf!{;TA$2K'q}380 7x ~2;cK;V nG BY7 RȠsddsu (x7ZYb7 IP)/]ި!vu4onXH|!|X/rK}$Or g7͂іxZ !ο{Y#>Q?q_%^洍R'noɠ˰su"y1uqe^(H>!O#'r 4Gnf'Y@_ iS9V<` OnI]}}uKK1qamIvr RPhewO^lB=Ũ @ƺD< "S#I>$:D4/;>:>hfSj R@RQmNH92= vXX#I$RB H9% )57GIdyFKԵj-C`!\^pd:ۀiG^ 7" QX/^+.Jr$J| RռMH#~LM`y OपVϴ:ЉyC#=E><)c 5W_'JLO~5 QrC daBd:~Q0h|D1& v3Ae(H.D0 F\6Cx$w&WBEyEy &pzE\1k4B?a.*RÏAF89ŐfBj)2LdK,1h {ȯݬUֺYṀo9 '"~oƞ')+W)|Mо>J]H,-<>b B(%! JA('Pzi A1fIG2.n "QȃNM:9hƕ;fYiB(sQ%m\uN Lר peZm, s:[,Xf"JXFPUPXH/vťkTvi1- fR )v!S0TYm"Dw`fVjdbB9Mic{Q;Ms€$M(a YaN ,{_vǢQB b\4'x ;{"t3IC ȢvPe9NNc rX*D IĬ A|n#;:z ӫ)ɹQJ'u/-:y IӳsڠHF'%8;k.JY˲&2ma DYXNs`;EDb13 1!8v i(p)}I:d.& tC#2XH&XnN MR0"`" r r=eQ&;ki ;}ĝ$3Rgu'~AH@]u!,e{v[}8qyr-LD'WB-b״;Zp黿>`h؆3kU l-4:1d%fb*R@pT&d BUl݆i- lf\NEOCp6^rӅ_VH VN{z{~Z_lR<,):h5DmLRE~(LS\"zCz VC5;ê/X$'_|(鰦苷B4r;<"޿+R߹UP9eؓ<(RTCEmID\^I eg\$" g~f&e=\/he~mMAE`aXjZR0וEݶi,4y0oA.LJ̕;Fd$T2{\Dz~(3ŦG٘׆fN( g 7֠;,w#Z<>;Wb)&bTK5) wh=bT;y̜f J+ q:o^5Dp2x4m+}p}Qa5R@~Z iFDWIMJ ,8ѡra Lz*#KAD1W]H ?lQAک d&QK oU^^esNB:? ;2J6O PՓ90"H23jC!#0`Α»Aq∁A)׮ӔpXL'єP>05aԢXd(@H%RSt|Ry?ݠ]7OH A5u  H([cRiYSJmYVlR$@fDiC@3 @C4`QM>@~|%W 4>7@t.$ !A}4L4듛ēҊ= ~3xHF >47^>,@,!]U!t Dvtp=1%Q%1vT!7@%a!D&"Yi\%7a0; Ϟ"h a$!I"YFCοaKL%2zfQ@!$E RMDL ʼnH IZQbPH XzȂ2r`?{prJeer{ksWWP bCh9giDv׭׈֕{jI0AHv30c`6!q*L~1x,,(Yydb8k3wvx.6Ha1K6bQhw <=aD1/hfjqlv#}EAw+p0ӁǫK )7?@OoӋC@f1 *d& T]^@y01GHsB֡墚6`ÔH2,EDM)80ddԉ QB?m !?C標)!msۡmLH[=,:gğn0xl6H.S;^WK7'ƻֵl]G]kGZV\olDqLи@ IB,#iѱQsWf 6%(Ft:vÈBPuk\4\wxI9gɏ,OOGx-II8h =SJ@r8{MJoP h CPSXl487e"K2w~*#}chٽׇoЀ@%h@3|fbUQ&ht&iGe53XZ#Z!ӻ Y0=pF#m'n箉@Cs׌$)|^^|7 ut&bO\$2&S܏ig,\dܷ(:=GSq&2 sn ~Rd{&F3Se>X'*}gj]ג&w=A>gnB^zHDzx&cqpRwF`4[|㱫n1i#bJL7-dL.&iKXyR SI3 ߎ85l\v {}G/P-$)H3w v=gU_k.ηӮM;oIa?+rܟSF}t_M-i2d_TSo b) Ud%s^w;(M _/6<^6m\-SR)jF[M"(dPMȢGGlgkDb~:>fAFѻkuڣ0VlLb,F ,}%8{#R M9^a}I/ !s\|5og'y4۸~$K4oΔO ii$Bޞ\/Ѫ'@_D!%.oȌL4&>h s}C4Րþq QrD$g(2-#˪vX!]ݔHc>e HIPghWU p mI>Ԙuq b + 9;zhK}- Q]>s 噾 UGG|;HgbXQ˴ HN3nتz9<ho 0u:򎗔'wGqZd%&$#Фz?eܾ+ ]x*j 'A֣H9gӴ%E;Oੇ}kY$ݶ&8W7(q1/Bӎ&t3pOſ<`Nf`wـ)gx#@}{d!,K&Vs3P7u㺽* v]z_u(Ŭ'O‡]EAB4t8p7[yCޱ&wjrg5(,wn0J M5iF'8LtG[;W!Q{~Z)i:3qfK$sHr9;׶jǫ X˯CPGmDzÊPFw `;`,]}.׀my:_}@L5 y:4l'ǁ0eIC &_/#qcN߂pBtHPFsbx:~mӀhKp+bãCp-،I$Mp!.D.6$;Qĵ2!p CN`u8K^4뿭Ƣ řAt2 A v(ټYyƐ?z}/-4IY*JTlRZRd%Z#3N3bozV^"DLCb3#0~De_N}XR-=b>0 e$ h%T`?ƝkϦ}A(H D~6SduPN&6!>J JT]r-F5l1{ıpFT:+{>ꈿ U󼿺;ϯHtl&#EJ8}L(CSm``r_)X'䚇$J m$-NEv,%yi+rhpb!'itET#QGCM\PIӰC=>8cHs $bwceVnly'!^eQ2ƅ6(THA+CxJj5G~5|ca=jk7<hOV VDX4{b6Fh4bN?.@퀑61y>&^`:wt#Yӊhia'k0¨8u~oPDl k%ti< u>s͕2 k׈Ç? G:urgL VCM,4u@8DLW&b >ߙ:1 asäxp|DELwtOIl69=C~ !&|f%Ͱ^l)(R@|s daϿԋ319}] ׌Ko4-񄇁zhaϐ8;J)<OP_*op(Gfs$i&a`pMMlm->gpTye S[p%#@S{r׺.ҦM$3 "{8@w& a  7U @S'TCߐGPC"# ' yLʶ>ʔEst   Q}AM%ъ*Qk%1:ZYN|"Z(Eś↟T40(y8TD``ΜY0{߾-<z?`\~yjQK-Q,F1Xa,,D󮤜]rmq3tq1%ݶ襻e]liXZhTQ-`jX*Z J¶2Vd%"KuiEA"1 uE`(wtsitn%Zwu&f_.Kr.N&X@ |3q߈{ .O\}db0FrC[+QTp'| 90WZE$`CXK?Wס܇!Ǒna1 =BOw>Nq0vR?u {uVBUJ021q Pc ğdAcE,Jx5m֡xH&,Qrr SS,!iY›ZQcYd&vDݲdm'*Uc֤QK 896ɤK&(,J$B\[ J4[t[!FEZLމt= $J\`b 5% Kg-)dQ,DbF`/eƥ-,("ڕg=QP:էTd797Q['+iXaV"/zX(jJHm;;sbjA-[imUUaܷ{^>DmJYe 0iK:%6c@FtS)m)@"daqLbu!aAΊ PuW9DPFo0ciK4|M`;Ϛt l{ c~9% _OüO˲ 0LmFkOPg֙wh [\CÀs$"~Aඳ%34ƿ}X}z~C,hsy@?7U4o[, M A!(#EVjBߨmULQƂ$&kO h@H+uHx%\1q>Gd:vO:ܿ\M$|nkz]L(tód#g0kmNBlNDRё)VFy}jI $:Q99bFGf.ӕ)iO=˂4Pg\Buwp^Ѧao;CYpd@xL[{feۯJ(+P0"O瀤(PwfvbmA&5>a1eKRtvl;10B6AxxX扚fhl8?؆8/Oj5]ds1iH yLL*dN`fbWuܝmBHhj2ݤ7sEK 퍤Kt\!mڠIj%3qXX-ÇG\ ['ƙat(yz iL:@X(ӱ1hg=ސ|9p:$`o|Ef cJL 0D*B2BL1ܑA@d#Vur1hf᝙+hX}!Si9lk:bt "Hv/bPJm۹)}Hxx!,B>hNbtQ1JH\O ݻ67tqUxLKa 5L>F"(A5! #hi<Ķ<PCᘡ’6~#.n.E^_Rip-\rkr&5jIg6߉Fֻ&5[e\*3^Э4:Wv3WO-(îa PEviBXRnxY&OE$@$LypCn}H,sTq.0Bix;M ‹3Ma`.3'rS`69Z:tӲ&$J|P{ﳆ+"KkjNNJؼ}nkI_n Be]}DŨPiCWw!gO,뿊vȾI'|9sd+<:nJ;;h-/E^C&aTW( ~DueJp! i}*F$F,7yāӻ'*NkwS9z܉sn7\;#%>(!J- BQQAR5" tABWcphu}0G"1:ioY^wOW9J6݉N4tS:"=WI zJʎzn*I@Rt6v[#g>R/1>((bI( 7(]٤X`ib[(M)>gOD)iJ;LL3tZm[;(JɻqU=TJn9bgi"DKWOre@A~IppI`f$4 0ŅUvbr[ عox0s)"G(D0H3ƈSȈ[;E!DOMT519]vБG=\e41M C$Xފp_QS:'M;IbxL`fCwعJ{:j@xak4p` mv*a7F}#, R2ZzNd̝9'eP9CaoMQEО B;(: cFVB T(I!s!A,C8/'w>0C`qd$:>aNo әiK%'FQ$DgȃRG.GsDvΆ@Rf4B#iNY!nP*׆Àylhy *z=X>VN$4R'nOFڧP*2tnr^aaMcU& ҭA HJ>[&Ձ*]$kAJ!"%L!%?Tصbňn{!7 [u)d"9FomdRIR<|BP2A 6TT"0@Om>72<Q:Mx*ـ&<84mR0 -dyy$ vP@?GpAB0 FRqZR%Kq6dC_[ܾa=.qΈ@h3sccς%ܯC#qr 6lO?Ed\$ BYI5hIMa"z/<` 9 VkU%V^u8Ǫ2Q$+ۻJv5/tj9/HW`rV\VQjy*iadW0&$ n#r!&SHU)Z6Q_a) 5/`$6j |H7-·iPHxʇRCN$Ih@Mep!{;y$'iT!|܄t=&q /llMeA@z҆l8^??,yP+sJ?-zY- Fbnp@~əfbAē퉶!q8;E/q6ܠ[)ӝǙ :ogܵ*99`__AUHSyL$BP┃DwFQ]`O0@`U#%fBR~IQ۳A‚ǨQ-cSlA$H%L%U0_?#JFȔ |qC1SzoNn/Gy5.Đ@IW$ s*Х (D@ۆd?bAP'қma2N Ϊ̄Yء-fSL;8؂n/^ou.}M"0cGO2!Xp&bcQ4@m AA繇MHňXnsX޳1߷ ’*pM hLp1H,WϿB`Cq 4 %[mzU֙ZFxQҙ_^2Q8˺O9)㩯.j ݄* aN%h00˯*P뻀͈ЫES"Ri$))N9A]W?!0qRQ }ןL'תV# N{ewɦus#Kc ;k5*ښA$1;pi09SR4M>K) ;vv'd"XR8QKE4\f54q< ll (JP H#pøhxQsyLُJbQFL8E,BG ;0#٘8-fv 1mO-AQb=IVL%bB&a(PP 0P=P>ǯCS> XpxQ#aKM6 85F4uMRɽ| ^9Dcr\2RGy61÷,)BryAmohBPArb[1@<["R):)EAp# FJi P1)k0s$$jk^O rBIɤTZB\)[!Zt(n@".70(;GDZ0 A i5{JQ Q"uE̾'z`#>1|tKKG<ծ:geݾuUVX YJf#@q[*Ԉsf@tu6?s^2ƖRS-gO&[{떽D2 o6l0t# 7y42l(<?U栞J8mEI7T዗:'H+EH|MуrkBDP..FaI"N< hR^e4  m|B%]Spc1dCt9PK ,Wybo1wztRN'6 bXE&AJ nux BZ(rn2.[< @9gP0JNWRS)W+u@e-(ZJ"PHҺgWRnElx|?Y<|i:b7/n]9GQw?rpxݚ8STXe8SH[XLJT8vQXBrIjL9y*ї~;3Tr$4ͭRbt,Y:o|Zg=adgF"D xH;̜PZNE-]u ǡ=B)4<yZ熕$.mf&*darW^b͘ c $KΡ՝ˑF(Pƀ}54 MWD˘Sj:= NޮA-J='vx ЦP$DL ䷰ YN6p?jמwzCo"LTʨa|:_>xNuI0-iB"*Y.u!,7j4@m9Bdj_dilhm'S8fXe3"cK!X 2ijnhF %)qyrB'%?bȭoj EC P>[*'=`+@) h% I`#DH!|T~CnUKs>TM~bV}@XQڠ?>p R)?s00 p,,͊fɊ6mAQ֦_Um1Fh@L\Rb`24 JRDk V)?=0!M_0'4j*1ZBF8?B)"1E>Py˩LqP"Mv3̻tҙ L\"e| jm-ՠ]P!Yn 8ʚ}WEnFC`~] h$a8Ld 1h!H1IB#KR4ZHu5kjksuHPVI_S}m y|=ɡ@$/nph<ت@l@ PTjL>q_{v{r楱kݺwO<GhΜַ==҆0ѧAtz4mc Ǥlwݞ]6o[>fm8xm뻯zV{ڹMՖnϷQ$2ot@mh>{u] z‚THp4'ֶrb݁o':ʣ|}Ǡ4mliNylP4$/}lz ` O{L;`u D 2@.@mP3Px@Z{ѳv>>@{}K)]|]{=:}viٚu-ݮmlP6N- Zv7>@}: _g}p@O@4m[ٽE"uy5^t\Ώ9T@{RviK=-vkw׎:ˡWmNӫ! Ygn@ڀ=:h  nJC@  |mmY XG[Nm$3εx[מmj]mkZu w],m2^.Ƶlr݂`dne4}7GV/CmL=Hmws栭pѶ ;OOKurη^Ԟ̈́].W{:-E ٹ{k{w*w57y{[is^+7iC\=j7w+ [k;c:{`fGzz A8^}}9kxonzN<|y>ps{{xTok͍EW-׽m/v=ٗxC)^ޮn`= k[cW9v7gb"oknJRl iP BN)nwIo=jI{Xfڻ]w\o%k(ԃBm"x(j=Ops-ں˳y}ޜ͞cWy[oC^a꠫iB'1u1ŷ{n֦[7|<^j)B=[N]ͽU2(;cpJC87vor^+l`qW-1Y,N:rs=\vjljk0sy&`-Vx JypJz5{1J^xwys"kyrml@Kvmo9sہKzoovk+޴6:,u 7[*v\˦= -r;zjmRO]q׫OventGl@];nZn (: J43ϟb]7#\/v-ewkà;zǷmSsaݻnnN{^>f٧kڡðaMU9y{B/yļak{gioq-lgv7= 4 @h@ M25$(|@ RIELAQDT#I?u)4"R@HğI H ((J) IZ(PfZ"JJ(( *f"Rc!Hw$ ^"Ol2CC3TԔSSTPKIMSQMM5({I" c(j"HPQ@PA4m>U3d@0m)80$B$((+asC3 @ !Mr'5S" 8]_\yN+d@_Q*йH2,vY"-&Oi2E#Ea RqP#DbU-0LB@:`DH`a[XQ2Go>|-V%qpAvED)RcE0O۾ʤ54DEbQ)s ).DF jJkƈSfhڒBY)_ᢱЦ4` 'a<وbt.k5"bNyْal@,P%AE2 Ԋ%6Q9¸Gdi"@9hL Ȯ\~N|z¥# # ;rCV=r P~/oXa&ߠy%eT}AM 9 Ϩ 10%#0g@`f9肀.p;rؕy g9>0`-TRjt)<sK J5"dPZ0EV7ϮVIbA@ꝱ'mry3.􂍻$$")c}O(#-Db]ADSx0tw{J&0ߜ_ه4Tɠ8 MFVFSʊY20QQ?-Y4@'wp("(%Kh \"LGYډM4Ƃ4+`s}w/@2vpna٨φi(80HtOEt/Dwzt#?P^v e\ (Q,3|nM3kJe hCe㓳ݸ!o,Ep6s:vhDG-98wXţpp21btq*'%uyCaDE=~єupDJ)9+gm[^Mw5JE=pY!Tp!؍9xZ4Z9se{.'zjK膹zˑ9z4.J9AtA]'qGSvN9 ]+p򠜰Y;ۑKZh_r/$˽oޘ}r9:I;""x*UIݷw ;e:nGn[+zYDUUIOg!G.)iGM1ܪs*!PmeMvX4\'qoPF2"*'Tu%MuEuI V2&U3yǸj6p޵ l:&p$`9Gn_tѿ});[~{ZF#r;٭ ^t㓺x'Q?C/!8Qt MJgtqfhR IsێEBIs{W=idUt::Y{h^%;o%װwx< ǎM$3ŘB( x Q8"ql#dM*Bws7N^ =(j7́Dl;ed>qJqd*[? kKd?x D}bRQ+1J$ew* DIZUUUUUԲQp{(~`Aڞ09Lw>"x;q';3w$nQp~}铄:_R`9-X}"wJ5Nxm+;N9.XWD]NfJt8PH"oPU$`;YQPUпp'"Xt[iѦE * $ܴU[*ԭp\)8 uY$am ೨Т(G"#ŲtIx [W*2"˪TdEPJ;s/ x9N88vpG+eZ96]dQz&t.DcnScp5mX<**ssa:r&t&՞*(Nt=xNAc{SW9—%tΜ("r, VsРT4u!ɸ$$ wܢyQۢ%&s<5d"tȸ%W92Ar(s n$B<+.TDDODZ C!3vHlHE˒W1˄M%pq=PɲZ 70Eb+rf놯=႓H*;rEGqv"9 OrX!"J {&nuAȷp]F1r(H6yA(IQWGX9s} %JT)?+.*9",GNvDY©[rΓkA[pGy$[9LYc.^"Z]-r$] !ñ )h[#ZÍa^4^y@D¬RpElS7yG=ӎt=6 9*N wD#!)ێr,,yІ<8̉vAE8"s 2EH8{wF'!)F .IJZR$I"zB!yQ-'"&ݓ(<(p:8 8vM {6:pK%LBBI"ʇ!uD 8AGBF㻄nN]S.s,d@JAݼG:UEn*;Q9hhLE<ڞ"4Aۻ.9.Mk[DfZ H梂 ]w'{sH\Dd^ฎINQ2eҪsK];-9c(]8{ g!qN+\9v:ȓ僗GvI%yێwvVrrT]G27t^urCD %їvuv2η=܊E7dvxNrrp,qrrY2LA&?.eraS@E;$'.WVF0ҤH(x0.i4fLiS=یrwq&ۄ4]#@k@ (XUprH"&RD=n ‚B Fv31aEc/xqp혛Dֱ@)%lYݽvFOy]Vl4DV.\oǍ}4Uuz<ּaሃJ6i,AZLG釘q_E!H%$z -.;ٯvUٹBfSq)|Dowv w(L%p3g[G)&M/ژk6V5wQwر@ǹm\ǼJ!,B`kp(`}v0tQv9a#n겔fXA*HY$UP8PT:z6ϯn?R|FiqXJ6ש06g4MSC].|޴F˚usS]hdѹ-7\l)Ls * /k rwUL!m5}scXS$DR"")NVN]AÓ #ӡJb7} C4[Zm7n0*6]GcthuN,r>*jI~2 mKG6T Z{~Ot: ]ý!6KGYl6k󈢠x=oHO IMھoȟ{uWS9ʃX9$I\ƓwZNė_-Ѣa]D4&*hxpjUg PgSA8^r`;}|둀tdr5 +r=wS%Rs84+5Lʢ^F< r(Ib&pQ4;EQEMDQ^v(b,q8LN볎M9L`wVU" aP*I[ NmU) GͶVhe}ƺ+toF,vuTgncr󡬡[ほ *4^(^6{ /:gE ΦB/easQvιE?>3/:Pb/VTd(ihІãNU'#y R#@X~]kМ{TfQfCUȢVg~LեT=m_]]"u0Zş6J{r¡Pc8Uk1{ABH&Xs /[w(z@R1Cb> g!\0~`k-6TܹHEfh246zwT]& gS 'J*DR23 T,W|x&(?a(r%{|Wn-{9~$%~gf[??KAơIr*~>dm%Ny}bnEg5s7ٽomϿЂkhR nq@U7Gq&U G'~=њ,E߻mHtQkkRAtPJȨ⁕&S;kǍzBH#&C6D|r U!}T.ݽ[oB£q3u'v2z~_w˜n ͖)QGvV?sKkENiLO翋npd,by3wtݏk1Ŵjcq,TKxbVj||%@;OprE8y0IbkYrP(""R)F!] %)8 Nat@HSN4H&{ H @XA:i=v:xu!uDԐ& %žOTNc[ScQ̦$@HM;?l j*;9Got$P*(呖wrɪ٠4j a }8EQzr9xc6 jkBQ[IBʦ(XDjĞd5Q,CFم$8ByBLi (x té ct1Ȕa[T#a` @'ݑeedg4*;C !W NSShN L/ne|.v_Yʁr&PElPo=]*\H#Y$" ML饷t|[!UuLldicֲsW^&#xT&*uHܪe61Ov6Bd0̤߇"5֦`~p9HⰨ 1LYwy;:|[2e641kJq?̹u\dj0ƄSAf1zT2+@PbBTx_Wd{,fyⷺSr3W *[]ݮ/;DOm{[2uk<,G{.Q#_jbgg̈1YS]RBjJyzKn~En}-Hif-d !m@Yׯ&vNMڶs~z~?1ܧ;6:gq5/OMEAA"H+ɘ.;#ߴ5Vf{ҫ!?f%iY9ǒۥ tc4Dp:fp\VY5PMYX~f!&u gF PSvVӕlD\u'WPp2gs9ދѻ]#s7VL[JQPQ]d\UǃWUuI*]UUVH1Jp}jLT#lw(xLds:lkQM⣪oqdz(ek#9Aƍ)>m2 2q27ԶSd[<.Co!فBm;hu1h^oF஝ZJc.VU<YQ mn)3mmEbQQ>()3&X("+2E*3FSעƞͤũWO)16:+?Cx;,{6TYuIѠI(Ś0_DRPw/pt-ݦ2}va&;j8(I`=X\*gY\{1@:ٔB lmu5_٪eM(Ġ%u)˧9K>ԁS9,|qoUR1 1& 2%`z}Vf*!e+U2T\3}Myn-Nb㪎j)V. Y_d#BjDf7c44 ʂc6&_yU)1ʛ]dE - @[(cŠ, bg0Z*67aVe'F1ӌ{( 3OW%1AD7 QK1|NM-,sAXE"!gbIAUEˎʠ3-|΃f1SHPQAUB ̑49LM%T%sUhLM#S$A: 4 (M?l"((H G(R&BhI )Ji(hz ́*&& * iR* QIJPP)CIM$T W/pɪy/Oۡ}\kC#urusyvHx~}KTXC!b2pu@ 42{ u o?sDSl1WT~wu6>f$!ldUf1ۥϢf9 E# 6:jgA93t(l1 w ?! }gK|(WthD|Z Ɂ_g|sDK*НU3L:QV&i[P>IE~`qzGTbMdzkJaXWEP4*5@MsBXBPV׵׭o~>_7>C0$Q$P FLJ%t[M=9KQEeDK@yqSoA-OTDԣrƎ0wTٔ-BBa^݊MA (J2u!E[CDA X'yP9$Vp2Sٔ|78}Â(w@) 8/M\zRLUڠT>4PT>ā")X` QGG"T _k$Gb˘!mࠏ࿣G[UlV ͫ!MJf([=?8HaLegczcN~?? 1'*II}1o$; r?cq5(BªtD׃~6 ɓ4PFhCii~KBуҟsoO63F * Tuh1&nhK=﫩鍉 XՌgI2>( ?5bLy#DfLjʥlT4y Mo: I.qt'3}޿6㺞vѠ,}ܺ&ymKQ{C%A&`^H(0 )zFNyV/\ n|?}#ؤyj'F+^TF(\5fɊ 3$;[s0fz{&2(*`\rjMG~zEBN>1=zՓAs`aESfav;2E'ңjj4×TLٛcIVM Uez~o‹ tI]e>L0f&5cOwO)I1YUpi {kAVCv3_[!)%%-%_T/r_ ) U:*<:4&qPt/{L)0,LMT;߯j)Έ{G^k?=SMVD+w{Dc٬әZ\=^zn#138pj26 UW_a$f|1 h 1?, 4g$,Fd{l?hp)~u7[O}"dϱP=d4̶cQFJ&a^صWQE5˺M[crN),:*̥czWQe/VFL* ?3ƣ=$|-*z fST28'&@F6*U3us-J`(Z/w7mhzf^U*M8oj^ lX1c#CuSNZNj(#ena;M1C#S"N]Xu:n$ZZq#Km%u7CtWj=Ts!ZYI! '4”"plS}̀ː =v mÖ'8֣S ̺:«X/R]/Xs!b1>ez(x8hgO;o*blw1DFtWkQrr?}yuKː#Z$u?VuuPdecVYY{F_t]^I|.DfɘxXؼUWZ,w^M,ﳭpb \wW)J*Y8.S t}m[l R*ˢtY\ xT9{ i2O:cYY@T:aqՔo. TDbQYSmNY]0'ʣ粖 3U;mxE!wnSQ@#!ՕHklW昘w CADEJrĔ@$B 9WGSz$T$H H eS >ӆ= NӴ,g"kIED`#PnoI1B`Ђ^gdZ=#N$E"21 1 rlmbeRfQ~VR&?P {8k(TSz~C8S *@0 ТHG#(a}>:A@@;BL3^z9χr0_Plf]͗<*#H =Oz!:^쬻G#ڂ +%Je9*jCɩM45b v >\w'MӃၡRk*,d;> Ӳ~̈9~g[QJn_;=V{'d,kɈ ְn9 -e `xtN>H08&^izsN?OԹ>06CUwt،pmqW?/Gjƒf}XF'.- ZY~Ғv!+-rhQ4CZ+d7?颬6D(>(`R$BmgUαZ~uwCrnBOJ1* @r<:::xmG֊e)g࿷hÛ!-Wl`ea,!/Á+6^`VcTD( KiBD{Ao4t~b#Es)5 صV(blrvv~x7F YhP9cb&j1+ͧEgi?U5lG<*T-ic@{YC#$-E ᩃr #P]=Y)Lׇ>'OG@ȀǪCU"@80ſ-~AT <;KS`beB:|N"x< S`}o=_tiLχTQጤ"$( !*?b}}|F[-\0USF{b}D1b~ί1/"4CM@aB? 1)`=GL-EXUDd0Q<;?T~q79~Ƌ>τOOOA>ۥ4C ?M${y[(3<ݔHȭ.Bl#VG\x: >=^?8gҚ_\w@ :e" d{6T}L:hՏqKFm3/oھ4Yߦ#(m?oo q;X:H?D?37MG8_5Kw/=;> GsA$T`!2u]qo 'Zs\G^2\(ǫɻv)p(LȈ$>@PZ^zi 1- AT+Q4PQ0˦ 9& ZS9!0 .gJR=$HPAY L´aîNj`xAQ ޿=?,txh2O',p;2y›Y@տi%<P 0/1}Z0bKGd|m(?ѣ-G^lQD j{3&(߁E\.h(A _,h#N&()@k!~s9ӳ%"y:YӉ>, WCp4,>Hf{\ APn4ڕF()NQ<|k9W=~;,7nsCVpd2X 12\A>ȋ:np ;6*[pW> ;.'P+ZoM;#TOq{eUdE@SiGU\*"tkƥsT!i܄>r%Q[b]kV<~s Qs19w.̓g%H<1a]p@q Zz#,E==&uV&a`oFu!azPHrIo6E.E[nwN܇ z)a@O-oo_(6OF (<2Qj*),Vo8` #SLKo6*tDzLm1pm3rWdb3R2][BuQ5ίJ 0GB!чOtEtUTE3~PPN16݄PӤ}HB5٧IY}G 0!S(d@jQ[d*p6yqB4ON3Q.0pi۷Ba*h#7u($ʃ֋I'+Ghoғ."'Ҍ"~=_Յr$gbK#@6> 82?>gߌ0wr@6[ˆ4f^wNM yI σIJ+8F_ld03SG<-$+}.Cヘ.?ll{3o|?Ov|U k}U(xz}pXl6ŠiInC+42=ω]"aa ّ?+Bo tP.Pjt|>[>}L`W^H GyOJ薳ED@o3ɲf:NADBFg]&Mw;˜vtAJ9.("t`bMϨ:+j ~iB9^ ^.$xYj"RT)Rot!G.|#ߔdNm(eW)EB"z-9SFYmӆ@收ǕTwa=CI8sA @4} vgA8%Ҭv{Գ3oWo?a`|dDcMI7E\ٌ5\8N-erTGX:ջǎzY/:&zSӭMQn:|ޞMTeTV\7\?g]w9EMfCї˻g27ƸVNjeuUFMwUAg%^&ʦcfꅷ]Lǩ.]bux(DTXʭu{ur ޡLGMR>Ztz7CrR^e)9rSSbo! z]h֩f-7Qk}8FΆd}>MiiWMB]n[m*Vqm-zq-yT[00K~e9!,| wH{)I2 zïc %ADq0t!yDTy%/2\y"+cńǥ7VAq/zEǼTM@gT~g "bwa )"=?٭j&i"[v~`¡כ?Av,^4o/=g|=IMPP}h(aP#%6Qx,05U^ $ֱH k %NRD$jG ?{?c8NCi?.THT|[}V.8J 7;dT!Z jJ$!)Ӥhh(I( (q%+C%4)4΍2Dϧ;h) </3l ;ru9ADW-`!C"j\XY<.i> ;t3ٔboR&G_<?N.I#v (2$>ߩ~w3GS]6G?| Y`k!8Mih&׀oXeIx>L;d# i>/]-ڞƉ}?_~ -9h.{7vPD ~ v4>T @( ҭ!3pՄi{}X)9R.%#Kђ{ DRn!H/Q3)-Q[vw\͞Sf1&䫧^`WZٿ=5@ ĶZt]bR1߱7aA8/t+qmoAGD߯F_) D=)r1?_gqA1A"QAj44_gŖ,JB&@?c/ưyAԡB}u9 ݈/˒窇7hPC}cD b>%!xɝ2(&_a+nņa{ş?ppnYWz#$vT[ + hړ?o ۟4:{{О!]5ע@w䝄jDD="@7C$D zRgLTD]KDF J3 /k0/vѐbTg.Vo3&"$Z[!Fԧ;?">[:L6755_9||nJsc*ʻ-J+U$VuAD)f$?u:}\7 ɢJ mVOuA\QUJ`` B$<Qf5pǐGY& ^moMT5O4{^3_Ô $aXXߪghń(B-ќba0NN,'1/x0z&p>t9C- 5 O*@ Sex盽cx{8=YVs׳Fy;c(!K{ , q)KWڧʞ|:$#2͐oF^pu(Nԫl Ah’TaIG.e1zKa2mX LC9aNX+A *e$@XaQb5.u Qsr;yu0PF9L{P.uۑ-X?\ V Xȥ" {BNU 7Z&>]WJ評mFYb]](@OFgVSFߞ,4~4 OkRSA mTAĈmϰ3핿ӭ0+QtuC؀ l;us ֕!nIwRpМqQ@(1㫮c=X 7Ns*4kib@),yA@bgqG$ޔi G!(&@ksB AM;Ȟ)MD"D;AtVf-QUf BQLZN1p̛$HB&Vp*u49os@nY HbA ̥dȊQ+&@ *cxLMɋ~ãLMغP/?O#ME Ja{o-b7-&,@rۊa̘09%9{~30><{}pt|x H(xn.x0}32Q )J2.!LEQ3NB=^}Kcvu{y/X*{"Dy9N{/ǿ<C!^a(B8SpmdZPpBPS0ѷ aw)k'M"!VbZ>WwbS''UsvD?_[@p 0~G?ySճSjo)Q^sǭ{f:*s6۷k)iW:9O5rrYӪeu6 gVֹ|5u9ZѪjYһ8:+SWz_/wu7W4TuZ<]_75s37rk'SudZrҽUAFy(r)e_yӾE!PY,>MyQװ0$Qo ߨ T`a.3A $fYLE=hq }ޣ.7)F'*uUZr[()0;fܶ;jb޿#Ϻ-~Gh$$ 鈘tTv vٔea⽢sⱌÏFxU$EF>)l4q|6WH~ۭTV%V }anePù;XRpJȚdL*zcWvUODp1j&:} uKz::'_zDEUUTT﫞z)}]J $1  fhatLr(|t>h; lPuvLBa:l&'q?= }SR<)N `Aǒ_$o$7:ݤ"( '@r0GΑDP>^ 3}*e`ģB=<ϣXFkS?>Ϳ=8B芄\lI3 Y>Ǒ s~@b**'yٟ灏n~#t4~{Ci\%zAJ1!Mw)҅rJ%P %=xqBVdsJaWZJөϮwCﯡ0]s ʾdDE6? sx~Op<j)?rˏo|w?I ɬp``٤HDx>R!oʈO%Q L V\v`oz\S(;QD"_!'b!X{vuahGr1 >]wQ^uQ2yv7 _83^}0QA$1ϷX]wZAx 1 JHDgXlm 9-;XBgxHDUF@D40ClћDIF˳Kw|Wrm eu}p7aWu,c0Z: 1) u,̸VFb3'\D9AQl)QZ2#M` Sn[ybRh **jUUUUUUUUUUUUA*+1u <~pfòl, q5]1 Tj 밆aݐkS0k5SyU&-~aN&b@“.ܹ7밑 \So+ <؇7*@t_*mO 4!!EuBU9Kh 5axCHx:( 9rx[8ΉhF%hDq oJ5 ba13<{b(;w3L ɯ}D3mCcv& Ģ(( USNq.lђ͈=R[9.HkC&¼/h*T4x(%-dFz,HT9P0T|Ű[,sOB[d{y}^-۫'_\, >xc՘c:s]8Zᤸ o0Z?ejiHՙtmMn1YHIJbO5ۻ.9N*I !*o'5@T,F e<Ϝ֨ްa`ɰULg*L *LB"!UI$wؗn[w/2ye zj_6uKITLWUVڎV'xUn\a֫oQLI+_NxBՊ+"%RR#{00ғH ՟۪m-%[Psk!]/7ֺx&=2tЪ%J}>ziey"!8[5rC^vHIxȴ-4Rb}Bkoq!V(f: Pm }iL/$%UbPeAP?>FiEWz{cSt\c`Q_:0sܪG$+XNЈ$}-1>Fl$tut>ud^=7D ȎǦ#/6"p@eRUg/>B*t0(ONeN0ɵ,j2bXn\Wx:-lD}P& ,ZS%B&)'.w[Rxwhüo)#1<+7ª1\qpP*yjj(~LЫ- "8YZbW$amR(D& "Tp[R#NH=zY.k7 S9';umMkpDӾdY 𮦃#*ȇxe"\9ݯpzb:*"ϭc;DrR mxی b)u\:VjKǷJ,;;c (t!ar 8gqoP9A"kLI~.!9?,)y1h 1D0ĢD:q^ڋ irLB!3T=mhG+i0Df+Fj"%.yۿP" m\ƗA!`p?`^3j}2!: #T SI"w;_9b9#ppI$H&j ӆz}ׁ}Ӆ~a8)'s=)I='f ӖSJ [v4MOZe  :83 j@ή=ޟH VvDAոnJ|$p#GP!;Ga0} I^Y 7EqA@s%4T;;;(ݣ{9 Pp QZhH(/Ah)7X;랋tw@,plF-!dU%dUw&ވyW9ӹAdU~ FIQ( IuҲQ 4fOCv|&HT@ 2QQ,߈=2g!*,# \K.>l8Zer|(zԼQ1 oj@u?{|ro&zw!Bí'i$(u62BMnpZV/@b({Ys|@}ayۊ-5:>#Ol9D),|11v^M@a^N&hzq|1|Pz:԰M.&bo*E3&;:q{:`r{:0sTGFa+"`TAvF_8Qb*:廜#85!#CH7eaIW_t_}D ɥRE2h.t0Wz (W{nub7ȁ8gިyz-Tu`8_6|o}K?PϷ40tC+0[>Dys+l$ *^r\y{ַ(ڷ6[Yˏ Z"xL!x6GwaKO_yH{PQ0F*ƒV%>gKߧFܩ-hέN% !NT7 1ldx1 B)!~貳vy$!( -ډC1EuwjMT&Sxl iTYP8Zu)DHZcD LLx #|o6:qF@ "F X ?LHJuY HNж:4o^I_rJl>L ާ{7@!bb[PL:Rk8!m*`#٧G.I/YiyD#y%0" {fEP*$r 'ck %I)RI(:%L >)Aɷe3 @Ne3lva\4Z'#q(P\g*ߺf :Oرt~J9jqe!X2/sydy(Kx C8G8FV 8=<ũ;oݾ^">H尬8@(=Z!8j w'7`Qnw%\/Tnd$@P""1I!tún|@ձ}Vo7=2ӆ)ER˗$CR{O3ۖlWm87P!O2Q2_PC>D{z@Xg<1ԯ-I>ڣSE/e9Sv K?j e:O7<_gUSDHkE#ݽpϸ;r_ xuݝ /oo(i`B#)&$_qgN]! ?GB|]S$ ، 'kmCuE {~d)5_ CB$6kE?lWH8T%~^-usnZB`i&Si18-e2d`Jn6E5H 6nf" ؈ l-VR0la[uxz٣HV]S A6U;UTH6rFk( Űay Il6agZr 2THQzh3ן8:&xxP5@ )m<;Df{"Q*DI+ 12U}}v!su > ~֔4eQƾ~h CH?C)4=xOgw_osM4Nݿipu$f>Ϸjv{AB#V-%yDU%5ĘT5m . -봝N}踼&:D4!/)`1np1$xk| ~C ] u*'P ]=t+҂R^@ YFP1 NckC5,Bunқ t_R)"KOuhnX`u38ٳd7 ;'!(: Dƍ+c*=~omᢍ9TfZ+! ?5arktV8ܭ0He҆4#,C9MCC$@qIMڅ xH3h"Jbpi"uvKp$P^u/X.Q`B l R"](BTQ|/«HhX3 DŽD@˚rJ1Tf̓ xaXλ]\P*E1j:"nBȃDA0UW IfBV;GW `: Y'})/lNGڞi(3;qwX#7l]*fEJM?<*ggixJ 0JUQ:|m9ned830 M߷f)?;4!XU,7-7q-O[v{\"4{@D8y{N9e "R_421jpF3Pl#蒱* p~xZP6( dt$\WLM ^`۹48WDJ 9E`F WH9Dۄ[@) ǔI܇F)CC(t!7 Q 'Pďt !q1J%*:vy<;Yě7T~R H^BEva@EPo0djJ@GJЂnD6G /2Y8 ,dztHae':J֤f::]gAm˾yV4U`W\fW\ LIqq,ƣP]6 \reWUħv9Q0LAAW)+?-6+!Vop^¢tlt ܣgX^mXPp(sו=4ƹÿVQN~IţūZl x&-JЍpIS6X &ͦ: _"Ǭk4oA׋'0l',4P1=s6U֋QݿH"StK@ݎלǼzqdu暆b|#N QbRb'_W?KA$(>"Vp1!FTA"QJ,B`D! Bp_yQv9V,&-73&ꢌo0p!&#_cČ%V|GVNOpSOxpsw}h;P@h:2G0f6{bN9S}R/gF{wGM`Ceth "C O&@<ݾ }'=7spXD(H'Q^̓FRtwZBSO0wgm,fw6{͏ g[jsye:SuJ(c1zq(БV$ FiAS: DwA]VagIó: T^No`+ ,{T2Q.V f[(޴ ;)@!]EN>VEЏy{?+~=Oo>_WӀs+ /ԏz-;M=OKk 6O^8ot( *2r*23nxdPX0Uw|ofi]0䳥3 =*|n={޳l4/1&DѫX/A~c"pK9+3H0F^E?TC89Q18:p|^0GM+[o~JDbhcNʈ@Tޣp`&[@8w (<8 A<6V }lvdIX ")q)F PC~Ab>t9ЇNa L⠞ b>) D5g`G-':xDIsp EQ(r%*޸L8*Pw9K@ M U9 Bv9o/@@ "D&c RR 7vtzt HHL"@#=b݅LɶSHA ]/"NZb_;_Ov(Pb7Җ#: uIDž ~aiq{m2E{G7@.YvͿ2I3w2$Xwvx1gdhW{lxLTBcuc+N#2p)+& ܠKăuD {o$N@0<ᇑD9$~:e@SAI֖) ?{BI" #<3\T"{s7rLKO߇$N䉥+ vBEB @M[sk<:Z!١t$Gޗ֪ڬ]FZ0rF-2x3#:ye,Q یoft>yd$Z 0XEA@m'HrТH DP&!æ ,x:Agu`cEPU#!q_cпs2mҔɢI* /CoJSAnyN`;߃|~q}:JRKgj09yǸ$B';&urC>79ߕ@ō DŒ@<uQ^Em)O2I5iv%\?f/U;,6axLnEf6S.;*ņq>h#[:d$A2d t-K dp{Rjh?9ޝѐbrРLeA4W+=g{_/9l@0ްA1}|)2jlÿ1b^y@*9TÓ/@X&Gzu:dV>4:M&41¼beކz:IBA<0W[眤2 y 4oHVz9>[z:;>|2{JgaQ'Sta[iu=H)&a~XA>KtőTU r.%Pq`@Đe)P ;X5%(v;*$ -K ADt TI'ӂDH$l+=\NuQ`rㄴuFMh)+_:E4W_{ZY\Z!$L~=h 0 ^y骸{~#F Ep j"ۍhS _JOE d|WT[|6c~88תAު)fC+x A zycP' q# O& XN=8p[͞ڣN$"(,(j3@ ɌNtDL6J_my9*jf'.(cM66$(bHb&(=Gx_b@Gd\YO^*[͂~*#[;*)6@SԪ%x@.`?CAwT@p>!z:bIѷr{a?f%9~f9i+VEmj 4OUO`TUIUUQU6+ഈuwBYc MBAɱTfeLjhbK"!w)" ćJbb_z޽tٳQzJR$El%DI]>av2;zwmSd`{C]s"a0ds+F*oWVir=}ړi 0 A#]e`~ μ&¡?X8uÇlVB?h7cy&ySÞ21w6>B/'|ԈC 4KZѐ4gapuO&}$H2Tϼ*@8$L AlGi݀ 3+M:W:P'^i/(^0*`CIC(m%HhQ(PG F\ +rcSBq |l+۽1@.pG5ڹAFb+i3 BCxDRzd'EV"@R@Lb__$[T>Zŝ1c8Q7"( E #߆sO€)14 rنS&#B!Qt@oŜBqwiA\QScEڔvۧun.׮eZ+GVQP^rg^?Ƕc3\b$=aF\T[i 2“ǀZPAȈ&eQ )c a6S֜s~j4і96ɐd"؍&n4蚳(oLW1%+U,QNR}O,3hAʒΔj %mx0!;i2\H0%P5r!Cg# Q$X$tzBroRYF!E;30Z-j˨į ni2"?@ʛ6z-}JEL!U$P"P#f8~C/S)-PPF!$TbmGp=]N7Bv.D&3Z93:! "@_]Rmd/04!$YMUS 46(:mܝjyj=Sm鐉O؁- T{ YDqqmhX+4!jb0Dnr4E4$ILD!2]fq+ioɲfLEy,ً ptn"[4R_ǭ}Gh1כ)Dha݂AA eDR(J&ne{ P%f㐮sb)T i~U vٙ!A}4}|v0Gs9M2koi7ܹ̐ Yw(7F>5#ë{'h`l ۗ .DC \y?N@g쇋81<*vG(!7еQ D~ȢlA"%n &/0D!@_U8 !$fqqR8!Tww?/xrރ,M4|,%D`:h&)@U_;du󐝧ܹ_bV9 i x[2.-۹ih0Aci,=!\ 7(Zޗ p=DtLmO%vL^D04xA{Ĉ9\}N~$pј!4DNRUz4`Dr,vcu<$ jPbއ*MP-LA$ ߽tc^7e?1]ѐFN]O[%!B$RǴ"Q4Ho( CB ?ʯ|]Q3k>Sՙ5+gTнr<xcX+Y`!0H" Y*$}4*O7nfsB)ji1 w#8#z A@X(# Tbć&E r҉T% +@Rҥ,JR1(F1s-0mzr lomϖbN fqic,kTYhDfEP%-@1-~r4adGO]g{g6T; j׋yR>(*4:?D|9 }( A|f=?[?ԁ ADf>˸p֊:Os1DJ<}p^Uu<{v!jMƅ'^? .5|aQ 7xF H#o F}v1!R"@"}`H:=r(PG?x'#=\Bխ[I2uB!?w[3}ϣ71s} ;G'h;#f3y5~9TЀ,@c!CTPGJm%tJ* $ p")H% @%(4<1 ZE5h=POro<ܟVS۷{qRAH4`4ܠ P|>K}$W^^BT=.2(?T}"H=Øxs ^z;>߇GGguP1%=>dxsBO ,'#0?4R}vVC;-8'е&QOZ",?$>]o*hN˱z`JS? Goɐ(%2ā??tIU U@TmТ3THEA(.DQ&+M19Xl3)$;L&KR+:քe5 ֚(dp#QBTP*PpQL*IvXqb#NALӛYTDN,*+VVYtC!?z=G툿⛄`ÈfLQQ1Rw`i?k6v ]!y= E@h\6Y{ G/I&hPeX/,QABP<S/X 5dcL=[e0Q(;:p(vD(!DIJD$JЈP)-L-$B0AD"!Q#O/# y }W6qZ:[;78rFu4$73I72 M ߊJ96ǣHf/5E>3<3DɿAb@.c']G4HwԳ f 7UFQb뻅 6vGxRN Ұ/ʆϲtLCOfk4y]QEԭ aـli VM+^}:(' b4Ju߃D=G?W'dv =``ɰR% 8{Gr9Gx~y?>'OL~}(~g~~ @A ^%f{I=m|-<U TOž}gX8 G7NTDbf`=l2V@"8ћ#@pX=ň=.{N^jg0ڃAÝC 4C*%N6 ĉI ܏NT̟?)_M-YyXCX~"#t^{W:"s3(||{5(?!MwőCuqdPG_h1Heg\_f%` z f <$+-Ay% ;|{8a>UVoq{e>{fN:3?vRJ rЃdrA $ %j2Dvx Fʄ6I (kM^ebY PN$z{&@#l<'ƅqP+q<A!6 ;d^mku=^&0_"N92L:ݐA<:^aD]6A [kt r6|c@)agKB:6xc8D+Ak7Lf$>B.F7|aDy|tUsFiP/fO_JtEiARf`"  O K'h:|$ OA;"&!eİG܆AޣByV߶<`FXl >VBo"QT6P,` }##^DVL2c/߯ }%dzG<C呃1'k$wfR+geG^@(#QHBqOpҞmOjX6NĄTX/ڵ98TD7(Bi Vdy PE'قӇӰPz6p'Q0A:jt{~?# =1眎jWLpw$/˱>TIZNv>@ "1"% :RE.YzoAc}FM@0P}"Ea{7Gs oadv %;xg(^64m 74P5r fjAA)΍/ТSh6d3/?Qg ?aѤ,>5G|EFdۼ,E ၖJq .@wcR݄\|Nl6ckE9:;:kj(.ne)ݍe|^\H8 ctV̪P9XAƸ l:ԣbؾJ{{SNTu49zEH!gJs (n'=~h*R F9Ù2)[q4̺^c0B3ċ 2:ϳ?s]1T&;B8O>%lc}pTqdAx(!̩ uő d;}Gpsp+gs r@GE0a'tA3`PnQ7Cẗ.S5杣L{t)! ]uP K.P$*㎰zc؂CO#g@!ky~{ !WSH,6WRp3\ETvs8q/ɣˌCrh$~\ǽ?fgm_. nHBg&Vp Ed6=MP2 (tA-*SfԼ01@>pE{!4$߻} Fx'!y=vdJ!1/giP.*Q =De.}~Qh̢޾ $U~ϯ}AsWDq*[pjKnFx 8R08+L!^<"iFϦO* 7`aɓLVTy@UQ#%>%W]@lٶ+W G 2֔anlEb _)-LZ ҭ jcRL@U ̓1Š\/8g;++cw[?3$Ȑ3`b- IU֦RQj"3WT!4×{n7ϊomXvf8gl;mu/D7ix`q:vlq}FuiA-*vә0z á{ɾ*B8`Hh3^k$a+PՑB6,ѝwAy guvluFRa#)\6-jX:JCg/Yo*F !2"M?ckYNS2#{$U:pb<<cq(h|B?6n+=tq֘a 67GンacdM70 pUNlC7@w4>^Y-2A"PMhأ@\w)8ϗH~ ;:|̦u$:rF*ou||@D |= c\fC&<NM:CYdo)gDXn("//4<%F1 E[tm^|QTr/1*aaq‚Dr)ܲN~6]±1WGiE&c;.-.CRW_H#j>]ҲLTSX%ɜ|@X󑌄cB @NYN0M7w$Szc!8o U:zȠ=ݒBy@%Z%y? tpӐuzua>.c5Gvw uOya!8ɘ1 ƕt? Nܛ:yvUtwvQhDqyRTycEy#]))o}h?W8uf9#'Ke>[;qa)d|T^ dZRz~"c6*a&H?Dm[+)Jeak24 3*]mQwޱ|yWp$JmfU/q]TXZ\&FWFiE(!M.i)Xw ,ZcZ럨O8^ ph[=spCAYAE\?hFdT7g&.CpNS8S@A;D>J$QwUUޝL0P\D//ܣ!(̼GEBodq0?hoǽ]wBQ~9}5Őp0=~YNv{˘#jY(;r>|-B/믈_I{i/JQ'*.!E2{*@ZrVS 2MɽpN8Sl6arvUɛh-1ro-!#e?Ͱ?.)@zmncߘZ_w&ރHD;=p7C >]|m}1JI!>+~JO7S'QmuARxz,J(ۇ~o}=2Z@3UIf.q"poDO.?G" @ A>jܮ2FW㎵Q]s.B xb)wnƛj2ߪ-&vy@~ _pT:E$I@d \G70zo]=ǠP{v4Akrgg8Nş`tᦱ~թ.yHbU)$S~a!@23`b,7:_?vQ.hu;䩇'ag'Hb1.}eW}v˄^WA dä㡠(~\0|{-an1ue~Km.WD|E0Ρp:N2n߷]3J[az<;<t\Ǫ 9+ Q'R ߜ@H[$9w~a7>|h3&C<>KV6?tOóp TlV]0~?N>7M/]ĪpF#?20B2\so#wY;ow o!{3fŬ:& ?w!?¯;a|s_j{Giqica-Ii=:w!x,VAk#ܖ>LGPumc]%b6A7 7Dup(ґxeum!MˏůpLZqV {p$_]ӗ0E^mu|=a1[?pܗ&#%k#rcPS@8"\ O%H6C1J`&kBcjՃAJѝRo*#k#70o}?d9{z1D㒎y/N Qtm:^~숛ݽ[~~),#ǠTb13ұ#@]ڍFIIL@ycQ((A8nD zMLDv}^ SS|(8ð׈.Ð@B݌#Wv-7߳.8VDGߌ4!+PWA;\vAա]T9CjHjcxOX|Go%59Qo"ܽ_`d,xY4A#ix'}DpN F0Sl|Ҽ^*G~XzQQ`C]*ʛk[z{:vvҽNLCHWJ}QP`MV9*{co_ ,%o iӦcSu]m سg Ov8X-HeڵsBSv^BUsjAi>2SO׮}oGeLwt/Asٮ aGv9·u4Vjz,FP 3}k9aA  ~{OLJ⽔E>*38,qJ_Nn ۟8Y,4;L HRXSp$iZЊ-W*7,ib=cX@VL뎸 5s(ܹ g<4Ωg e]~^1:tBP,sѸspqU;<:uadc9xF$)jvC,9|)]xg<5󿛯sÁ4W\Trq=zG%_yR;) ;#>v Frqǀ|3en9oxamo5MNjAY`@$ Jaߋ{6xX("7갨&=hGx]~x>e (g@m憥9bӪYNo<&#K7vݳsJy+I5*sHyn`r`w鈌~ \|waM̀ϟ>/4۽gVS ]_GTR2Bk~3I FDoY.vJ9 C=)mRvE>9EHXֶ^#;F# нN¢u~Tgr+;3G.g@1D`6 wpRfnԸEGqVO"C o "7Ńʼx?0)J}yXhVX[w[sWE xVM 'bvhB=(폟Qjqg4с×@^5;4FQzK?aK;meHf>23+Hg@IמFi׉D: :]x (]WZ񞴏Z[ G:;cMVkKn_LJc [Ґo?O\ey*B1풬y[oU3q>}T8/(^V'mb870]Qa)aK?E5ZF-D+3z@v9NnL02N3%TasRa{:Q j=⇗,"=g;Z}iy_ݿO%UR#%ځĪ=ǟey=mb=39#j-燣v ;1|ZB'Yi~ZއRv+ 8 [:,ڇ0(N;2}ӒuTvڛh"X?jUmezpHmbgsZc~nFZN풍G]>2y;1c: 8pLqwp *,YMsqʾ$01ji0Z(j#.T }9s֜`+[qQknoD)kxc2G8^al%Fuw-n-\_W~eq'xЎBz0 ~D0q,5HN>Q}u j5%p,Smso?@@&:,≂)BUUQRR"JD E$PUSKA1AULEPPL-DTQ DD ԺbpD4Цrm6\+$X3V3 !U2'K1ETQD*10SIU%@PCQ$Q U,$CEC(JTELRCTRDUMEQRC4E0MED) H2Q%QDL A!QJIAMA33MQ%TDDRA0AARS4Д SR4PALLAUS5CQLEU<**ba@UP@%PD?%M52A$S@S$QC0DKHPIkSDD0QI0̰UI4RPQP (J)*h(x4 C0)GhX")/*UDEJ$bZ bi ( II&)a ))ORiuD() *f`""`DmwΈQSCNi0y)"DIdIzpipaz`C8$v]E"Q{o y@0@0ϳB!OULuS#c"@^8J`HhE(DHfTNYTEX*8Dq :@2I@ lB v$S rc>Ai&#bJN%# Ra%P< xu:J`cێ1kdܐI DJS%I"M"~!Hd 3-j827ٿZ4)pʞѮ@A1(4.4h4@JUR@O_%: 2 =XAPLP|DL R8TX$P8&E;8wdh& R[/@/ׄO督xg@P>ltj/,ڟ='&&'|>-&0oeG8Z8yb i* _ SS;3ӈ )?l @0{$ *aTP$ axYP>V2 SP 4 4vh@@?Ԣ]uXH &aߗ fAT,"S)Aa;MH%@ Jk<P.*)S{7;w^}*=cR-'WDn;U?I#?!u~a]=?MżQFlcb'I4s|#f9nuAA !q,E@3&^qoo-Ql_:7ӑDpQNM`㟫)b5:ۮ[&];8nhAoǬFp쫝k0O}wNLqG>;͆TN%3DEDXFM;'kFi]O߳IL;oVӢ4y\uYow~ˀǷ_P7@ $Ѭ26. ;ׄRx1Ԟ@8RaU7?V?%;ad.%"v] j)@l__weov㝿j^vb&vDEU8m$|m+!W@Bzo?OА5=^#Psx>=T`?!&A!圓 4 S+8O?!&R27a"~-ZV|ϒ *=7BBK K>'RMq,M_,ʃX3I0d)JptۅH5A!0` Ĵ18 7SxLTB.}8Y&3MIfP@&m0t3CyMwކ EJ |͇fuwp3tmSu׏'yb43H0QrLUuiߘ 1-WBI,@q^y.V!uqM#yo!Sx$G ~]ɾP'.kYN%3ɇ "&<^Zw1@(q{pzÒPzCDJ/~BE`w-(( _ 1ɚH"7R p33-d}uG07aG-x L2%f}CoJ>b2"*oTx0hZl:no|jco:_Na\ef%Q9Ss]fnf\ݺNj{¹ԚvmNW7%Uteu[κͨgW}kzNzֲqÚr\귩6δRa7\\뮹UsEܼ2=FZ֦_://rhkmίSn(̗|yT륽uUGrZ˚7˼αfe+[UYշγ\|̻d5 w:}7OoVͽny_ ^MrqKd ttR7g!:03_9%^엛GtG%Ly-ɪe9W3>]uN5osy7}o8ssսMuCۧ}K[w[޵V 7|}:5DuEiU[˩[徹G*%g:9Q{9|sfSSk\{W\WuZ=//}iz(ܗoo{%|γtSxdMn|=WW+:fUm{ e&p}WU]g5YwZ/+fnY [ͭ֠Mί[./9VG]f5:^r&73Z>iYZۼ͸w^|?@BG )QP ])#J9 H' g }BP?_޿VD !ZX;wn"G[?omΏ^):>_T>Ojl;?d'?FEW{9W^bV yLBfDdIz|L$j=&#/zWف>O7@P+ct˻?AZ.*LǏZmGB=/O=аG^]`EL 'zW—-K%%gL%CƩ̽d0NswEYxwz0 &SI CͶ@P\X z8vvdX0H Ƨ32qǨ^K?G}<^ne91{fn7A{zVag'~/ni/+V|X{6Q:\F) I,>9{HfwྋwSlwc{%Xi(4a`vs '0b `nZ}2ɖCf)PuF.}x>^ꨊe۫9TۼICnY!N$A}E |l:<|5"M*u8Z[Hihuҝ /l6t0 ,xf[^ƚFq= kXTY(i x}T÷Ɛ=SX1ۼz[>nG7k^% $φ($v4}n4r;c#@ }Fz )Q pYnpFbʂT`SG<]uͣ1؃ݔ{ Qǚ-x^'Dз #1aJ9є0Ug8SO0eD{=Oh㻝wC_<~!Qhrs' w +<4 & blsw1/zuѡwh/bOlF1;3{]@t>g:XO]+Qgo> p$d(C<QPI \^!ϢL uw^xh+,/GBΉ4oKAI" ɦ>i^ë]z=s^Pd|}~ucNݷ|3k5TΓzB) UɷXʳG{) ``Z])HM,a3ߜrfK(),(W-\,:legU&I fcYZsGL"G?5a, &͂XđLIAM)*M*\}`0Ubbk 2e[EDL.r*JCDhV -0/.[i1􅖚e5P}!xY|ZdyPKAe}2Xqs+BpYa[O& G Ϛq3q\{^Ԟ 0=냰v/38p,zd!~]76V}|-V镛;Y&xӻg 7jfa#w^T¯ 8UIP;twƎZ8vӇR{?HcC.RK֧Mwe~n񐫾]%&4!K?IL7LE_R2 b ! q>MAq\pKsmV'Bq$w' BXݞ  șG:Ph:_B:2fsI}۸_UXRޙl}1cSUZe65c[hMxIM< OX2q2h]:e, v>5w֋q SD7ә^ܩ; Od㇬)*T_÷r=0'-: +xUEpqqW9/^P&^x Ec'l1RJJӕD>cXGB[Pmk2w-[IO-(`Z焛#6zv[j(b Z[qYIs#{k\3Ʊi~Y\R1IG'ƇsW5t`&#Y^OJY X BRJLX`(\-|'4a[/qyZ B[Ŷ|V6A;`E5RJna d&y}[ QWd2 ȡ!Y;^`}ь!ԄQO`EGܤ,3&gGpex DT_m,Fzywc46?VBRRdbve|{GQIZ>+K'9QEZj_(aj@;JFsĺW=L`;_D P*^uM2$BaPh/C9;_f7x y\Be8`CL[n=߮h[`; l"D4OdzC'N-`ŽcPU-듯듮0CRC!@UPO?}]x|;~\31<qߏ ?=;ë>_=pɇ7 fg<I_ahS!!@>s҇:ooݭj`dĿ_?hh2~й!*)lo@ v:""!DDDDDDQDEQEQDDDDDDEDQEDQB{yqaӗ!DDEQEs DDDR҈0:3B"""""""""""""$DDE9\ 24X%p;#VAua 95DDDDDDDDK}n"">CB~˘*kXX8%d a_<]Go{$œ R pDDQEǬ0mG:wTv#O Z+1 '?߮KN~̀bQXWMN|{^{;|'S(F ")8?E%GTis}ُП? q?v!T&{t_zbf4vvlbX,QQҁӡןN& R(i i<Sɩ}hny'>sfW$PQN O̹LO^;&K'[a>)BTtza2)]'+?UadP`/rK XyCf"' 9aDRVT>Bic>O\УR!!@vDQgXS1*HBiޕՄbTZ0JrK=T)_`}qw9@i~9=L7-ɸpv='{A"Y,FH0 $xl*ԿRx]fmХ~v}b;Rjԫv& *tq' (QU7Ht|WXǙǝӗ5{yYև\𩮷UwΤyeWWD5/j$gIAT豚4&g$AO n:ؔ⠨U z(8x }4A+K1;ۯnA" G= Ӑ#]\ -YnƴO^pa!yG=Ti`>TD[ *W; B&iW-K%Nbӷ8u@0֐R.̶ƤsCx jTI&TUBrKӅp#KYȑ$U$T 9 NZ:7:xHjOmeiBJBD*rԪ>A]T~9/9Z68䇳2~O(<=f<`1x&2VEFƌlmhJsdL4l ؚ20`PEYD5e3IXwo>QTEyu>H>a N}Ͽi-U,m;\kЀ'~"ց":%y{n?ȷ窥S+jFphQw/݉A&T$bt~kݸb?'O#[߭7sy7/g ܺkCuiWɥYC q Dƪ`U1Jַ[u>=ʜok{/>Ǥ G e^C=6l5~FE$:UC{! 器&~TZO~hy,Qx,OkIF 9GmdkBd͘7S iՆ9n=j:L+ӍW[`cabFhyScCtxJ`  brvz4ꅆ1Ro+2JRl͕U 1¯+-SLId |fkXb XYz*5mZ_凵p|t^6oD]<6ݦc0èw8Gs=iܠJצ0 T|!k0姠P @PU8p+}"p7jҞ[mmӖwצp. pRx7 qxq4'B~B7ͯj%=y@] JLRK), +"N5y"f5*g!Pʵ/w]mU{LjzFd5foW[UF5#˳׻}Cܫ2g ܸkڴlTPf.rmug CpY6e=xfitBREjZX%֨]3aX_e=n:f)b0#"3E\h MH8 ! xB`ȱN-I3CdXCEEP*L{??a١ٓvjx C<V# 8,$Qbg5zEeƞft5U7*nU7g$UZS]s&γltqh,d >2wU2\1,QJyI-li0+V*"!u2+dYg|3Y LαkR/x@@FTOa3z: )5mm>ai8aς"L2\Svh@8LGV41] xr+BSjk5a2W»O[f߃d`;\hgM` @sۥ@P̡!'YPQG(CB8w&;yrxZnVJ֤W#۬~36X7ssL\Q%[39Ǫʚ}k]g&P}ƹ_lw'J>8mH5^0ű^ၔX3v]dZM3 v )#>u$1a)eRixF %U/"|. c'k$BX⤺U LeH6S7KMYU+%T)ځArI^;e +FoY9x> +{43mӃ1ۮz~^~?Iqb@a0zzuvwI!*Si>O\tzW~2sɡ'\c)Iwa)4GXR8735s\o3]rhݕˏs8KΜ#!oK<1cIˬ]IK NaH2Α CrH2ƫHkm<1HgEvX!e-xNrKejxn,K𾻴ѭ?10A=7G*0uD̍DZx?Fc)kHce7Yy%L?1Vkľg;'F&0y*e*Ihe5/5rvd2\]w3FwՄ+FÅg "0lEHoIel`DžxኚH; <aX,u\G++UeZݫifn=42W|^βg|Fmu8jeFm|JػH3uFԍBo= 5}16]$؆,Cic+9;ԸY$2HQJp|:!Tqey.aX{KYm2}<}JӆJ;<*C~FMzu:;;$S[BVgPYB}٫{ξM7"B1,y/FJsBpp鬸p! ސgEpXMFo -b6c9㿢_hUx$>0ûj.)ȉTI(LS'(:j!}pdR<ޏ",Ἵv|2oax>d ʟ(#h)22d/>G^v}!!Ń *o_Wz' #S! {8ˏD~!(,y8"{?fZDʐE:CP% [OI@^zm_p4{:Yl>;zCdz[f)[B@ XKYZ )U@E  J[͓Ƭ!e"ᮢOJzBjVG@dSVЃve[|Ǒw.D̀č"w.v)FDyΠM\ܾ>gMBq(ЅL)s bݾ0>ӿ_tt$ zA(3Ȉ  }VGcCE2ͱT^L|*p'%SāV{YaI' DX/eO[^+F6 ֝(=G__IZ&wME!Ƥ:41FfN\3FDAe+4E1ݹ|LŲ\qfx%*X5ǩT9/sПA^٥R'j8bKY 9# T KHIEJK8I`sf+/3BY/]j(* \u$?\~+eO-;r9N[x?LIDv QYŇd'.}K"]J %Elݦ%,Bn1Nq!Ps25(3y:8a ێ( 9%=6^T{ 1&UWn9L81P@:fL4=..aݏTt݊ DaDv1+XRNJ?M)Qr=7*T?!C]4z ehF-MH=I修o%E01nZDt-c˪͂Nl$db裬$B/!bp6ho 9Ԧ≑H(򊱄x3AIEL "ⰫpD' X$=" >f;LE%K/iQo GXzK7:!7.A/u@9y\#==2 V_^HL%kq>#e. /O#׼2\qd ymu2=ey8@n͕|Q0iZq΃X@ЩrD̗;YSX3YbuH|Qcɾ7\!fjP !]Vsa[l_)x ž̢B)G+Q#Xpu(%Oi(2/'G|a!j3۟|J#ZIE"B)UxIMJhBn)n,݇'fC|h^/?Z}>ڶ WD}CS>KAMDL '>B2 |o`S.|Xi^"#v:^~0,9.~fP@_2[psgx}߯@&EQObI^> >nէ}u? 秳CMz M(w8_Q{=n kxc]J⶗)&`j~J{0PqIj 8."Z^7toH PsjT8/@:TyDD*54#=7 k|%bY+-'(ϳuЁ~ 6<=!CBUE1 a:׀U[hfA_Yz\rnVEF`9&|;N  "6Ұ͆`&DV/kX޼;DzMYw%"<-#dixH{:?G?ggf5W"o ȉ6I*zʊ ;u(LA|u3߇:-}ɤ#{T>9! @ @xޢˈDb8?cozr!0TTvcde /Ó8헌x,& mYx3O_sqi1曦n(pݖn9a )Fm[@Sk,1"*HH((d}[A((eg |dz@H]x@ 8Qqpӧ,,,JttͫPUǽ׻+rL.%ϭh;wjN[$ީ|j1\RcTJH FlUkfE7)nQ۶#Y -2yf =44v~C"3ȬcZri^ ޘ6 Ap01M;!&yo.1هW7X33QChqFFT%G"3fY1kcpUk;^Qds"i 󯻯&ToQ2(}~=Bdg۱`34(0)~C  )3r(o4ŸGw4=_(e`%0:ټ4tcz`H80!WC2sN,!Zw'|C4\ F 8S嗙nӻE0نQ"I$:5_Eed βlf9gL,(~aVv v. s$uajɔ*7˓qDΧqԀ$^jBzE) CLyhj >깉(@CB{T ] D¨Y@0.|'O ]z[E BWσx%AO49&5|I1}Kn \5ZFNa sw/sa9(*'&!*F&% 'ȟY?GMDK}b5-7-أc mEkLG-?Orle@QQ=گ0R+HFš)nzD:{0Q*xzül^j]>G ?BR"u_"*G{ ,s@k_xntuQV&6J1P(Յ={&Y,i\STd'Q%!e.,P%MJC4,꒰X80of`@*`Sb@ d`i%#a!27DS?>2dQ;Wmy^o^&< =WL="QM(K+(!ltle'$Pc:]G'[՛zUPYU[zgNgR<@$KC#p#!`cU%s\fb&% ;0@Rr=|3Y),3^2XTbX:T 䤘u1|Z"u d}ҡǝHUTG GX(ou.:TgUus} Wo׷Se w>L:MFceBXyuΓ $}Q,#"R)aH/}BW2FӌO:xG>7ᤆQDE܇GVD1]]TP:T':;S\@F"j($Ve < [w[Q*|90)bx= ؁"fâؠ)9R {?Y Jyc~&>5n{MOS_HVx>'<#z#U j~@Ήqޢ$t_0ArN:8?6=p SZd{^;'d:}gX]ϛOO1b3r.$]4Ĩ ^kT \;ճMVz=P<ۃ(T$ὟI%/t'.pZZ ģEЉ^NeUêoa_aBq !=pLi;As͇jaHA9*=^b9!w)ܝ$S"?A&B _v1 DAR4!$@A 1U $RP,_P̈!bZJBUcO%JPJ)P)Ԙ"4 3#rup" pM4ݣ!|B(ZQHc˒@A,24f0%,R??y#S(W@r>Rt&uHeGcA) T jU#c>̀GJú]dQ;6dG(,H0 +pj ʿ1 `nC_←،;qa &uIQmL!ѫT0/ Ec]պAqFp6R/Л@%t┑"]sݽ#e[cndc$3 EDHT&NIl7j|Bq?)ɼ)qE5 UC 4 $((j!GdDIAdMYvy [dUD} ڍejo HP ;RCn=Bm7hoT"m3LMu%B0=<Ʀ/ft\iBCB_fطX a|[X2 -A 2X6b):1 K (@:7M^hJ'CLŲ]ˌ 7ʅQ&0PRW$} I@ HlD^DixX WkDD@{]yU @Pv@)!$  (b1a CL:V}8l0{|g"P%5r9/&n擝W%h'zw(#^ )% b#8;BV q=ɤ ;x Hjk(B!(_4K!?O?_Ѷu(fovT E>0);ϯ Gz,lBlZ>q/ivd6E$Wq9 2D5,dj@kՍsFU>aoA@ɱ~@H=hXc:Jsplp *z o1\~).<4gpSډx<C 3 ØZ eB` e2<w=u=32~|֓_x 3%)ʓ` {\wHOmkLv(`*UT}݂x z,9`Q~Hv`%AplD0`(R$%J'Y~ñ!P`jvr:J~H]@0/S +猪S)\ee@@q̉۶edKxkQS>=hHne>0#+& `=s/ #U1 pP/c;*Jvx Ծ%"s6Ka8!I #El zݵ)1\"H"&G˔@P9! p.R!#퉂a)(",T1DAJvvAV <@=d!h-$7>r܃LrO?B!,ap]==! ;=1 E <;K>hsP,0a!0e/a O p$)ଟ.\  0w.c ~t0_wr&R1_lcȏ+oP1 N XƱ}h"nFS|Gb-  "vvÐIsQ&6#B.  "*@;a'$?-'d}`N 6aeC:SwHr:?7`*|TU- d^wu7$:MN0%G;N:GAI *hS,fr_˃]I3.|}>,[&MSZlF?:ϯp+3WjkS6oEzldSE֮݃7D(2aL%&(0vUx]_zv#j\CڪzP1v=J"p=-bV#[RRZpUy%^̥gFߖ6(0<|܇}㯎i +H=O%zuaH1!M報~d8_qN9# `y a4n$:&c?1|u)i~ڡ`s}B)aJ@:`C+CN#=7SI+wl"pQ$@=[%QQ4Ā x8x93_P^442eCl>aB ag#pn6NtJRN 8F^}NW5f, #idE!~)Q'بwF:a:{9ؾόTM+C }NӢ8X✃g*wX|],A>O={wT?Ol<0*IJ1-}EW$n1!5&7f.>+dI&]` G)৵Đ?@ 1>8"(C44.ŒclcExK'r`#ia 96C&,V&ȑh݀_a-F^psR`;q" 4Flr K@ "AA^ӠifEBLFNٛςu(1Q`h=L:M䄡y$}ҁg <! `z=kZ6aߣQz7p-J{O2Y_=ЂjLt;\F QG[`!9@?kpH.;#̃IlQAAACAIAAAAAAE%$?\lq/K!+BRD|4)n>`bV3&j*$b r].@:a>Apk`@Ѷ(*ÿ)`!3K@.]g csCx2%}AJI{A+$3Q0(O)BH zĐYg(R!d2a. L4HS N!8$'|:!Wamy{y|"((jx9OD}oQ)QtZI!Z_=luW;!GqI;R9T C >㴁 cuzǻ :rUPDy!0X'04r#<%HYH yw'o O.a ȝ{a`GfN$N&[q@nqU=C䂇(_ӡ^ݵhx~E tQprO6 ྋ7aÐxr$'#TDapyyA^ 8X0K;O(4DZo6)(A0N*lPz\T f"3H`&ĸDEmDXHdWLPD$0@v,uls 7^х|.kp0h`S"fI }B FI{C;v$߰8z8$R)BZl: )Pv1h蘢H%)()$6v@p~LB{9dTѵLDUTs8hORFbi(L.=xU> !/  @Jݤ=/Ab{}ȃPA4w.V8``HAK >0 ϳ:ػr: :z|_TC)4.GkjGgiA4 {?N/Q"!ZZ Q)X o@ yu10biCq=2e4E$rCr,8G`{N|$@){c4QMQESEC2Pj!ib!hsf“ $*$Đ2|XW̏Ī*0~yg,rp=OWlS!}dr`>?Qwp S>yq:@ 1=ʇTý~`R |L-Ayk܆1WPۀ!ay0N)XŞţE* #&*cwچީlO bz/mcN8WIc)B`% (l EAȇ;3I Z`SC|IDdpL _ş@;D=.״aMn>Z;g;3G!,(LO#Q n@#& `Spy ̰2ce+ :^ 13{eB%>doƄ%Aȍa gbp .TuIt{C;rCwB:=9UOZNp?xtwn5LI~ m3/^^rh -ApDFP&Ӭ~GxOovˈ Pg>)Y?'G,0\|̟=;hk'`=xeCvR<@o"0^?G اa(yW7AOohCG=8>ID,pu+eQytU mIyP$ Pġ`Z]xu!o2"(KS5tz3UYRJX),]ՄSIн e? >q0}eSk bjIg&#mVY$P@ze]QA>CK⃑їbZ =hN&;?Bd\q:s#o1B<'66о3oDv ΄`qmPt %aQ) !RB"*bDea"sH Z&Dz!Ǵ迪c:u?m=lTոko_h0c|p1 梍b6P{]A:7!V٣VQ82j~{ѧxz ^:Y>?l(~NOC*^pl1StgB}t PN#S '>FCba';EpB:]`>O5y[Qw 0pUWu5^<$? D U 3#CEDOkrSrGGaLwnH:/b^LЃGnS?`M ^SI zvo]aNJst ,iEd0]IJf$C2p|ftCz*SiZ8雧RTτEw^,#oN$T{Qױ 5 kɞOeYkMe顶؈Ux!<@4Oe!Rw7 9–tNsUǔ.הv{:M0Ty8=䉖#hJ68O|S =p"R@$ ?J&*X)xOI|#e|*G!+u)S{h0:Jsd J>fY~__'0eh>D?B8q!id}^4Pa GxKq2A9a2>HMmd/0;6=<)x)4Y,8:Ĭ:L`T Z[5viflj*2k^KMkyzz`{jgޮYLpiq 2Ry5TFv0R B6a(%Up![\ 4!8c8Ov] C jtbnlŐ`6Kog*?`X]v `M(Q:ֶhhr*T[j17*~폓f4p41QU7C3CFu{ ͬZ ٽ&"iG)Cten91ŰO%H.>Jh(@E ($}]z0?yW:V;Sw%a]65xna]6Ju9oM<d=k9,aM/Ϛ[]a쫫`bDzq4N@e1|i_Y΃$ @G@!(tH;>?jK?M;O8 aȏq0~ThMc{8lʬ̮Jt΃ ?X{(#@3#8d<[t#j1~u:[kn \#rɀTh0pqcSw$&+t,YϨD!?G?_!=e9KXQ2 A's=pUpwQ@.xؑa:|(bFV7[ ;A5`g "9_nyTT%:ltZRt] %C]S_2E^H2 sC*1:_Q'JYϨQdA "Kp3OE!&?":Z@tv sZ,Z=*$łH ;H=ZG΃~`rk^x 䈟1c!/0H$ag't  ]q1@Z @EM{IX7!@$΁ڟhOR| ".LS}0˿lvI;]4Yvi4UkEkR䪑)A׍].|Wzכ G\7~vA)pXꆎ m`ǡ(ƒ n{zyRТl=wq>F8zágYtD|B]6TKܴ7~ 7Лi\^ ָ@(zqb0 QF?!\#Xł4I32歆8[T1Nᱠ;)I:'Q LaC ܜC~c:d gcs+nx|4{n9/_Y4xj X&| ֊FoNޯ?1L X8=61=L!\V W=Q *ѮMOZZQRW]Vt aE<1 E,{amK|`"(z2~{Z664P#!!bJN HQ@ %X^HH"A.lj\L׿pVFF8Y5mhuL$BA [#cǟ`uH@C:ü*/a \F8> qN냙1p B VP66`Y86ֺsYAMJ#/SSF}6 e|s35`hÅ SAڦqE ,X&H$F=!]ܸfTC)P2%0q/Qȁ\sgzЂ?w>I+Ivb:d@]&wT@L֙$NS]Pc !#k}4Npn +Ǩ6io;Tuq1*Q6&fQN vRyEMi!Dac[e]#zOww.7 xFwB}h]H -`vѨQ<$ӓ~6yU09 Nv. $]w츑TIW)qX|+Jh=Py>A8|O~H{GFߤ = ^^Du'zVQ.'Ű3h .%h!oi˼>tYÇz 1~  w90MbT$Rud@(E<^Nxd9=.:{nJ.ثh:A(P慓*%ٳ@\tu7Mnˈю23/?lD*e~!b|-g.ٍJ#*<\ZMF!:Tr=5pRId&R2cQztCԦB[m.pQ;EԷU(c OO |x3hjT6+3$.#o^ S\'e:n&N'&v !|b'e"*N9E!̽2r,[ː FHf9Ӗ񸒃J 5IYu_::`PUUSuA@+I/w– uc@ppd2T=x #K9l=,ċK@A?i |"3඀rLAК>m[.yGWjڭK'!Ae'sD@X,68Ͼfo;%w]랡6@Fa0-ݮ[/'.mq̕p퍬fz=O43c{‘Lկk559q+yTu{sgReΥYU&鼚 )@qzt{.Zn"P2h/4&lZjA=s=.Z `U ;5v<"``z^/MC{'9O3{pBD4KPJiۓ- r @|007nP8`ypQ *y"b:M S4gˀ^Reڃt'iS`'hӊAl2A (DL1+*}m <]>Yz#!wH|p/$Yq@ƶt1^g粔 KI|%q{3 AC1,'@ütݱ -^V"r 5+]͹;Ȏyv,wkv.^/TҪ1V h {͓ú fWy7yÆ(RyQtq nKpnwW bGs ^7mҦ^`;)ik{}1 %:C€bpUEQk>ʍ?Kޖ ?{Zv/w0XD$Dd7L0*À1idW΃Kȳ;7B#ΰv /6A721Tٮ% 00ȑ`L>.5-WjF:amxfgM'n7*r9KCWPsXޝַxlMls?=f!˝ݟWplH.ǐh"=~! 8X=~{ o;`!5Pr_auHn lL?+PuwgA0iRSH|L[T𬓬UOWM]r]``_ӭH!t@K$d0t ַ@0N8_`ˇG:D $nvvi9U \4s|1Mÿ*Fygv "T1Oi~ZZda^F:M=g ق"bcme<1X{DGӨ˯aݎ"`FT?7ɴZִnw=r޹r**$Է鸈8ȼs@4A*P0d8l([C,2 3:ݗeτ-}mRt{70l 5wWDǞ0^8 -vemq_eǿWףlT:ϟd>_:8Br!ٸt@N;nF!V躦H2y=ş333;j̪"B1 a K>-%_ñGW-Cli5U,d7)eLiGCG tJ)n,:蓩8RBxvcO4 = r29<÷`ȃ4`OS2,2^lÌycpTQ*PaJ."9,jp :<׸ 3u9KVyTz`-(:D.XV(!|ۄ^$VWwlSXdUPlmު)|o slqٕV>:}tXwX{fNo߹޾fFͥKm˪l7#oREnMv9=3;lGٻ;G$ ~9t<fg_2k{Gpںi' %%oߒ Nz]{T&6o}Z,F .vY8laϸG,T* ]I^0,fMFb:yLw dB?@Z`’7FhRO~ِ *ܳdTp D=@=" hB< MzċCәĨs'OEoS?={~f Ls IsJ%N}; Ȗ -u? 6K҅u{EMyG^&S!+80l%DQ4EM:BHa[ FMd@؆iD_  dOJV,) _81ud(a=ci/_ ՞ ^Rh_а?BEN7 qb*$oQ{oI=gn4:d2DʌQnaA>o-ߧ?X clgB򑟗9i{c|SByà pI9y}?lQ]b-$PqZ?Ỿ}l)iPZߖjLJk6 ۗTC9=Bi nRZ^CF]W(pj4#է =*6ҾFȁ=*;nGn*<@ 248+]^/wwsN8wGzv3EC</G]GVsbG4RUA`ѥ.a6]DccQYWyz*'s6nj9Q{*kH>G[tc&m[IЕOܳ [-kK-kccϗ:u4@0=zX퍛b/(4{pe0W= NOv{O @ ?5?OP2Ee'"Jp'=} gzEQZ}cvb""#.?;.9$ Z bZfס}3z,HSs:cAwGl.2&SM <Ph# HB k^B^rB 6a@Oչ1*g u-2m QI؅7Vmi ><o(s%@dHZXX:$=)‰TR}mҮY@0TS"@ IyQ`*$/rO³\}43P"ZYG ND<%}pzB%i0+ E2'S@2½d 0 !{;:M7#ġ4|qRp.Q3Z3WX,~]Q+~;kcdT1cا S(l#Vj'WkTX1+DτjɊ"*CWu0P%1PDPREAD0@LAIR2Q(5$܈,Vl!?JfqH!aܽ~] |@~ց-D`ub))Z')QAZ4 ´:r HBHi9@QJ$J  5O쐪}! 8EN8rd]_# $$LS PL9|$: L1΁G3?G'c^va竿*?v"ˌw'`Y4kNX[!(~ #/cՋp@f AFrYW#B^Dh)JFrg!"AX!aZ{ !L40G/_"cͿ#uCb ˔u$ӣ%FG R%R *V0)8g}piƕ^! I2R=3`0}goP# yirp"SA.dMrOO~t?p vJ~ga jC?@<xƆ@ vq=3RIK@$ Q3*L`OAHĮ u*eqVE]voA8t0?&`a%CxG"@܄`>95d-Bjg"fs0:6^˃hN?ٱՎ21 2''{[&@gO>s3w:?+[†h6DzhZ3UǏu| T| 2M.t [b٪*ngY+0Vt֒:o {GHvjj(xAbTCCVL|06F9# E% pv2Sh;1_xI1d A:␠ )@bgAxoZ!`Fe 0()1/w[@>BN)Tҋ{l~Gl p>DְdEB3 T>׳I3DJ+F9'P>d\T`h,b"rTDGf q=0. Ćblh_JSPFĵcDr CCb$?X|;_W.Yφӷwјa: yoφ=` )Fn?7/x%8  48Jh J+>ϼ,(ʐD/{N CJ2rmE)^N^MPNӡt~BOU ei$ÁDsC_p$[9 $wqJɄa!ee%a\xEa/Z> *QSPED4D1%,A1HSJA+IJTCMLQRTBTJȓ@A 0 * Aj_ 3~oT`NJPQ(()8 Q^XJqH7pr3n>ĿM7#aEMg[ɠhik LHY&89X+*. Ԟ2O:H\m:OBgHQ&**e:-Ґ@4m"rGH$f0HK2D%J;zI0|<M~3yGSs ""TǶw56op9jNfʶajQC08*F^YS2JEWsr C&ɻÌ(m<9){ r^I)DXBLz0b DB0:Yahf  .Dl$-LVZ;ElNtM5sB65ʐ_KaĀ4EZyX&ɵ.i4G aNY+Jj<ۦk\`ƄN/uT PhzzU#ekh`ܔCgHh9&[*?c q5A1Aɸ(m9eh Bd ; NOb`r2~/kdn&UB@ (f9Y'8{Cp'~Sܒ{>'{myDn; yԸݽA1N؂!̿ _ '/O}(`g0ӄPYsT 7 ()padF8?ZӐqGnd YF$dHA# ӓ.urs-4Lu@B 0@/7|˒p ܲVa%޾:Orϼ/$=.~O:N5i0?/w`0Bdq UCv<j 6?'zdr?Tj>՞*$>XTkaA;??Lm/*D æD@|y [y<@gñ~E: hSLO'X5/}k{-]n0 2{cm-Spdn9SI\)+2)Ǚ8(r JO~b eGtʅ%6>IxXzu>~HszNcC}JpRjtGnXL ( u6%-ԓ;:^xq 1" X{W͓PfiR#:TkuL?=4K?$d-144S@#5T0rq l&Co>h汒(",((?|hsj 6d6;ͮ wfĬlmb-3(zQJ  @{v͡$c?ZHQx3N%|},]Y-%=.> 0}"~~whd<۠ 5x;p:7 T;XM& +M .GPE*X;(P`Z3:]*aW>p 9%2 m?/m !}AAD0VCݢq򎴖]aEjS4IFsW;4@ϙ::HuS۰*X iYRc]oYO"tVuÿ78 C7{룪k[Zi#o+[HaF/ߝ$B ^zj·cɦ~:-*>_108 J#GG*ίv<!laᨃAXHԏ{ AAŀ=X3F{o !mɤ]AJ*4PCڪNBUܤSBNDz+N9W}&_ iKhcE!Mv QK25Xy*-Qp# !Ɂ=ʂvN)4YߋH)Haj$X('`%>AyV -BP| g  p` x.% 7@֎<,:xqe"Ъ$ a/OC_?Ȟ UH&?yy}ɓߛP)' <X>$QPOz|ϖGP rj'u`l>u8f76lإ)0ˠ0{Il`[HIl&4+$i srn~g4#tUEeB s }Ơd2V*9}@`<xOnVՍ)]XnQMO{knꝙCd8 tQK.x'V:ǻF9eaƋ-ádsUq8=Qc9aHy=q Zϫk͞Б!U :m#Nǰ߬=wSb,@Pٌ VʤMH:2\%,L !&0tR$&N 1އPdJUi&`.1ÏqS:v9#sƗs;x0:jq8DfJ2a24aډwѼc Rq+,œ˪[O HIqL&HP)2 8*ahAM"0'v Z 7_/` ._>2"=C;!Þ)zLboLg𔦄_q#fuA0$@`CF">@~\ߏF%ˇ>bV T؁ޏTŠƮ[*DȔ2+2$%p tEB" Gl\y1 :)UG8܂DHR0@'g7z+2{q6;<>- ,w m5 e "U(jwyO[Gsxw*sÇYH#Tl2]JW1x`x,sqKF': qB6!M^ TS4cdKKGH(Fh"`IWr.00b*Q&$1&Nu@̤W5Q@ 8Bz!ފ&_)-W`0I=s'Spu v.3!HA%:@8%U?Bcӷpx #%~wa?j? P; `W k/6ݭȒw45wq])n'󦥕Ͷx:rѣ y fZ:v{T`qL(_EOS?Jz8z:P8I@BxxĒmMr$Ҏ,ϩ}o74hzBiXG'jCI 8 Gr.JPd9h L,Jn뒂 4D@^Kl8N FL \<8BМB8)gw{쁪T 9:sP>c;kQ:14Q@a/뇩{852# ͮxV!SCI1g33KBm-"Bz/ŸL颁͇)芀#Իqcu84뗧 ώN'D',3!')=&OhąpFC0f}~'zM&w <⦇މo<;^ ?E &ytk A΋M"vdBs<2p'ГL-?Ud L (Jjd3 pzbjrd$#`(WֆD]Cy { (%$͉Y2k.c$@-Ѷ&%k TdÇa!XKp' ”R&(;rr-t#H`# (?tH"jlYS;l@P;3mK>!\&$=ydB7GߴوrY9XxY;wԳ-^1A:VXl(_iߏ3hԞ6ü0!C#+[ 3"͏[~L&!}?g}d)x՚ZJZKuOJ~4U12`5 s˜v%9 J%I 0CTs. 78<⬽OpDWKxoGN,(.l5B3yvs! "we3뎿Npq-)ȣ`/+Äq2! ihêX,պ3{\O4_:al?q8`嫟vWss-.ӡ$D[K S˙\wIЗjM0ٺcbp%Ikoiȝwc_C,yԔ`lPO!$2CFyfR-S;Lf5J2!WvU]( !Zm H(KAנUF5 S(L7_ލpi+&o}FH(B*$(>30T AXDw;=ӧk(7ʹBBT9lo(/ xآHsQ{p)Bnѕc.u4c(VHKr\=0QPaKtH5b[ Uu)36frd"E\(&:f3E|4g!D܊`7:`D`=5yֱf&))QCAތ=rq@0M78~5=lx_0p/=[ @u(%, MqVq g9׍H8e!fjbhe@`М2ǡgiyI;&[)DH>,>%$C@sMiSuƈƛi&ꋸaloi:zaIC/v O7d%\~}Z/{=G:^)W}HG߱̿Mr0hܵfRl_A}_R[/hiAڛh Lk 3:M;(g7=.EɝU/>Qm tʭ%p.DXiO8E#eF pu")x+Bx ~; 7}X{]0+{Ѫ8CӖ W}br[׽iRMa%!Vi"`4kAIY$MVlFfL4yNK@3%"OX(~瓔LA: _-c_X|s=Tj <')nFN㪩1a.=Q!E]nGC)7-F3Ab(w4pq>INoIbF"rAFT<V[R;ˆ X!Ta@A;_/j W `v d$B|kPvT_ B m!W u;tBa1 y5L'*ϣ*+V{J_Z~8YȰ12!!\v~o{9G ~W1Bcj31]У$zzVjoD~ ( )F`xtv8Xe_@?#<{<D*A*--!#1C#oK0!_B~Krrd BI`^3d5r:P焰Diۀ=:yҽ=GcT}o$:q!aP&)D{sKhj$.7Lp4LvJ&v\&Qox"SO{wuK62mbX//<ʂX;HxDly.2)pfrqNާa_y먈w`?ClsVkAI8*"V{9$O,0'R)PK~#xc$^&l9jH{c9gD=tng<SWLwTc( uGN*@8A>E)"6,Aʪg 9\.iդ|ꨙH"ӉT&{0 pH=zK3ń+~8)U$hd[8 K|>],up};TDfX-*g4ZPGD'c21'QlKEAQ$-55 u75߼4zʳޘ*XWPm)rM)!lьf! ڀFD.-Uɯy⃷&|ow}Nb0T# ;`0d룰\  6*#מFoR_>yT)WE8]-#M$غim.ʥ]5]?MFnxh$)l7t+֥>u>q7K/ּ̻ղwO{3ڗ6k<==\Ibe )$r{B"I1YބKt )ZtK~y'6F'g ٣o\ۮ}vځ$ A@%mptQVi3>##] /N"$atF/H<1eaD 3iP."&lxvYDAЅeFC%2=Ca}:R[a;k:dlls8gX'\7jO?Gzük;fZU\fj'|!Z(ׂ Eg@wӖ3g>=nCKH(1EǴɖuB WFI@'vN}`P-h܂Vv1 # L2D%5HYqH-{e;alN86!9 dߎ]2 O&B"PGA:TSAʹ[TF"LU raiZ>M,3eHqX8X hDQ@ɬZZ)-y;=<^0f؈4tx@Ck;aU(y'O7C|ׇ q]P2F6wp}t{B/ďOڒ)xz.B(8M^ըe¢#H7>x{>'GqAlwS|vgqdUI*16==\dOֵA|MfU5glC>mK^jK8wm>yv(ǍUОWDA FBdm#q,my1+=U\|QNApC~Wf.4_4/]g`/i%  ; ]"1'<0헲{xʳɾ󮛱bm4 +4W!Y;( %fN*,|]/ 9rB /Үfדּ)l0lTE袏~Ad"aU1PEHq5UL4U:ku櫭Xx5k lE#G 徭U;΍yDVABe8!O=xuAj3%UvҶt5$ߵLY͇ԓ+N 1 U @b`0)pX\eBSjo{?| f -xT % P`ёrsYFQ0-z>ɛqR=iLL8"`lo% *cLqqR7"<Av9~W<)? x~O1s>L4r֊ HTb"F&  "[u'c:N`v;΢`2vT@*`ThhvU; @ @Bq ,v.Av6C;:pe* AII:Iqxs*0C3N|f 4M0 !RsP|my`-Xp++]8.RvPB(:K'XavɓbmSC`NVBvS}Tm?'i%q7Cʼy&wJ#!9 "AD)ucQD 0TIS+$hG ~%@ >s3gq0y|;IB{Y IQ1mGu g-s"NMG'0/5~|sI,MI[?>a+}?#IDBDقMwΌNvSs'v!ݎ`@BMT$`+8ˀ@ p][I0숦bvp]}$Т v{hf1Dvd,@Hr 2dYLD35ٻnF,tl+38Si~E$C5PI@z5P%?DH(!lB/&?9QIdBh bT(RΊb"*$ˡJDJ@H9(JuZ%"i i(RPp2P*?vp1|S >`}ы/ҧ??#[N9Z)̀dۯr9! =<'zhC'u W ;> p lvć"0cLq̒Lp(}#ݹDM'ĥ^{NBImANAU:w} G`SU~A<\S]qBLXc"A?WJP㣁cY+{ubPҕb`heEҍQmb Dpyf:㴐~x0@H' V b?PoG&#k]3}ʮoM8fQxȡ#~~~װƦ&(;M7݂ޘ8 VL}C#F |;~3~hgYm`a5$9ɶ&l=|s]ȸ2>ڛ|PhTp<!yFZH_ ՜sɪ3w1L4eUY"~#qDhK Q[*i8h\0AΨNeQ9:Q?F1 S8@CHbMqQGC0g"NiDl~cpZ*[i pK=Nc4~XC8Wzr }5z!!?s%Kռtkd.aELz@Γuyˑ'+V!{{˿DU/qAݼрȣ&!0Ceܽh*r J#]s M;|49=* 1':#L[ژe1kۀʁIGbQHL=߱w裄{ _~sf`c4B&3UNպT4+aD>ԋ;iZ 144!/qCAʊA䘰Bz=C^ vMt P1D4^y@{Y֐,`le$@0Q `5Ә $h -H:|tLr30*V[ ÞJ*C1/l/:)ߴ8It)qXxqA5PR4?ϭ M`kI+ӷ1d}-yßgGĉJK| %" ń1ryT ~|B)fE #pѤ5Z0g8'ulqy3#'3nwI!<Jr 5 e#kD!RziQ搹sE1QRœJ /)$ѓO/TA1gIƄ'heO2@NgG)HkdЛ9w@+u;H'U`mdj`%CZ,FX}"U c`[[ D\K46ZYt3HՏKrgS1# (<>_x0eI C=L䛞g$9rv9/$ ,fE<=3pS/6qiƔ FF%j+hYfR H~imiU(6sUV$ntLCÕ^T.&w?[O6H F# Tb]&QM!}) ^M87ÄX̑!jyf=u()? \0T?}=MVƚKM1RAA@yk0vppHy I=$Ԩ+*1 ߨ.?/)1PQѻ#t<먀oU ?Lz 8?f\.a؄v8i#HPyJ"e&JIAV{m` C "p-s4r *Aiv{`'BfvG5kZ"“Y,mEon۫6-,U-iTű`Zݟt|;FariY-Pѝk"yki7TWƝ?^gtRjH PRa`3ͤl)`@ 'QF9qu:|`{NeG:m %w krLc/\0 6.):ciHb`QTF%]B]mY7gGot׶Rmc;LZ߬7^!(t' q̯͘w[{ rG$IWſ"#(羌O1g͜-{ZYuAԒI 53506o(x%>a{l÷^=αrA=ҩQauGdzaBthҮ-ź3xYQw2B@)d*\QKHߌ{\,wY{¢2mhSKFl%wDyc4/R!UD#T!eֱv=2=d\ۻTu5AEV:XJ.xӸS L `( uQDF cFGuɈ߲qh@ HTt1MQdE`sD~ujRT⡫i ʆAt:"'!MhfF$4P&?^Hmk$9(ʫ5;5ﺿw*봫2{5u־S{, <9vnv2bi[IEr-^!DEpH 0䁦7:0rМ 8/@,2R$S%D,a3#J&kyCɡ5(e2&Ҏ8tK١5Tf &J!4Ll$&;kHi )w9peP^8ul'q%q q8l 4`XQ,( N jC(D5DJ, ǡbBy;'Tdi d8/f&>*gw$P69=+;wN% 8XS0D !񃿏}Ĝ*U@TR xOkscU]:ƍ?Esjߞ*sl"F< :}> :R&?u/5ǿYJw`jnaFbdK.UMU`pi?$!&RR#L HN\:"cE6K PDC "rlkLBJޘRj; UL˳ܲ$YvP#2+m/6CdeiD/Sr zӽ}nzyT=0˸xGI` %.* ѥ^aQ+Pk88[NXZiFͱ;XE"#)5&yy-zEYP 4-Q]wHX5<(&-1[;0SRE@mx.9c-oWoA8c%Z),%J@fs #HnfT[jn{m%R_oQɦ3\.x|W7F)|# <&iw(΄banE{Y>>waqv+@) !wE =Asty}+Z״qi ŦQYR@%0E6j FP4JcP4Oh@%"w!|>RNK}/?sO!kXKȝ&0N땭MILΝ]a)/PNj؜|k' 0(C'p;@%29{%ŕ5F h(LrRBm"[P7`4b*F3Zu}[\'=;8C`0|1z\)Pq#LU(4W~RP$THk2qqGtssFr6##fݠH 1$LY)@4@ M Nxkm(L1 яq܌'A#Ԧס# w3K! ?8g>\H3 ?h>B̀v >4=}cdň~¥C ĊE"-TMuyP; DCP %QP4DA*#!LР@K4C 6q#v'?/@#X8&I8A$H<u^ N\ĭK r[Vg+vyrC\k(+gYhZu02kZs8)YA'$lقPNE`KX'O臶'ށCH< @&$@>| F9Ds*`ž,CKAI`jûo(ϑBS#* /'?}&@C}X, >*"v> =,O黉gl'Q%R a"@a  Z P)TZ0l\_=$)!g-?Q8 Z$&@t`>Ⱦ\k*-(/ôpdҨ'{"B" ۋ;˴ 넌ĭfWPE?;0u[lp8ҍ`zQzfϾ6!拿5Uݘ1$*iFPyJM0ˆ!AZ@:|N䈊NHi-A< lEpB7qߵps8$y(yݡ쀆~ǐt$2_fS( Kp䖚IG9 c;㋲pv""!Zl0\A 6qz}vd~3rw Q ?0"R`a4zZζU Cd m' iu{p gQ{F{e &hCcO{*t1Eu3)N暧Iކp5 u ,r,#,H:`"IDR0:ܪ tr(PbybXJdƗA[U'|Nc69faHH2 Rl ͖p9*j0J];1p4;0cd([Li5Ֆk!V,;5929*bm -~R\Wr_Gl*}ӎ/Փ:>8("GSY4@D;9ʿOW:anELsE"xbG`'5rJZD2H]0ZjDs¾Pa>Ҭ=f3T@ejmQ`zka%VjK(h! C-xf W芝kG!;[HĩƓcQʆZP,NUS.<-S^Xh߱ C0`D%O  _ h@8Gb8A2ecbkL|Pk#IJ CyFGk=1HME22% {ڰAѼ!+:CaħjVMP>hX#d`dMA=/au@wsALN2+2߼jzCC05>L:6 1_ W`ɶG6D4UҟqP߭2 Jl6eN{`R0'辉%Wa$Iqxe|1yᛢOW4޲0 c37&4fF]o04!AteO8S)>.l߀") _M!Z4Єij9۽=vؕ-!I@:CK,zC" ^iGQ)A9ʁDS T(@PS:jz23F0 pRS 4u"HEv}A  ܡp>M[`/#F0z10oUULXPR5%SH1@=] fS0!'PE0#\џ́ƭnr uhaYR 4AQ  #)0&ixv2 8pR>*/a!%RDB Bΐ$'ei\"8`"Z1!1-`(D8pB`!c$ýcp} S?_ov+(<ٌauD$m3l4&%>zߎҸ1D4DԡYi>n_U%[ Tt vꪇQM)\J~.x ީ F(pthz#}%DQ (o7.IOi O :H@o=5 PP&/_c|4h/`S\_*"O;vxa vSu)h}%yQ5 <\p*Bp<_`žvZ8{nł֒&ah(yIx98[)v8P(\Pn8S{:4N"6TH?+sd|@\OtJSr0Y0d$7q;# >>#}"2#qACʏZd)-r c,zo.l67[u}gg^rn30Dl[sڂJ*65hŐr. `2@6ra`NPFa,"yՐa"8&PA4% DPE09$ sC#O`S3q2* g'ڜnBvgL) Dh!?i1 bh'6A]è|>^wҧP)Df2XѠ ` L*IED4DBH&-&4DH1E CKN$4!iB! h 4J4@ HM TKBP5 %$H4-Q"I(%F 8PS[^Ȟ%'hUTFI2 v\4SZnߎCQEdx`v1߮ÇI2?`l}NL  Ȁ `i@Cڞ!듎A L{"mBKƈ (o{E43P?GAHs ,!(oBЎǼvb@d((Kvp=NOސorѭb09I:}0bAu}ހ:8|ʘ@"J))bfbHaBjmX"=?A˿neDX|yǟ%CKdٝ#0Yx FѓţS P &\8% &

"3H.µ9Ϫqu0K*utD$z#C6o :,N}~mi?l@wMO?wAIK>RTyOvͶlY)ׇ^ AC?;ֹ똍 U`%~C!aSi~_yCI×~`${jY$͓JDP'Z!S{|LkZZfUNO>:pxSuTU ܨkȌzjqQ J`كB h HHV̹EC8s ]y@raYHiSs!ƹ2V#- I@8n`Z 1*3P"akk'P^HTR +dw0|}~1MiR$͑D@6 1"[=6G fS4Pr!&LB=Ljpr 0rCHSMJ|ҽH:X9;7.b]`vb>7@sɠwLm|(K{;CW|'${0y=Ja >E{}95,k&6O/IUsmz`QJփG㫑hwdzv;ZaJ2̇Pt1E* Lzh <$7wё;E۸OnuJH/4NJPLH !@R ??ˮٰWHE%HV + 0DJfiJ%"1H@ЁB%(dJҪRL$QCIxlgǤ!.4$hQ) C܈q,+arvN+Q8JPO#>~~%J)HLrC(Á96v߰0KjHK6 Mf ?)34%4Qu3k %Mā2#UiahJODOyC`?A QF%XH:9c%(Ҕ$1IAEJ -(T$~vABFzIfB'끲X2lp f4k\;zZnELJ$pDSKit tr,&HR' ]O\axsa!*8 l_*Z_ CN!Tq k"{@DvL0Kh(*> ̔aQ)eeO?5A yR)*CD4Ĵf͏;-X@A=Q7)%eU Ih0P0ac2UQCB7d) 7g@uQ_t_mu6QfeM3{U݁<?.޷6JgޖW\'k=L'=p&ӟa^FW:s޵XJGLa$dtRE*(qPd֨Y `"&GK,W@OF}lQk1dV\*{*0PctVuajBf"Y&:4eK*b "d4º\D,]˺6+m˪-s|b".(n"Kucm(]ztTGZ+6=9ByՐ.FɥEd 8#ܨX@W1WpʟbH}@@z~Xf}%Oa*@/=.?:G$mdp X}c2,4\UCePbWtG%=>$&0%mt~rB(%ީ}үP=N+bb:r;˜|j4I˟ѕ  !3@fNm?,cS!xpN!K: v/:YlI$$$ ' *@b?! `k0hIu)DD:Sl&%JJ) !(bBA(IV!嘘bJ ehZDRC*R!Д"@Jlg ~ !h6^ݰH_pgwmȵ #ASa8(ݑJqmZ!uVL:߯#6(JF,R$ -iZGbk:EltWNh *<4%<`6yC^$;Kvp `9i`J!.?{CsE11?%OhTi `έ*'6؍JAAD9 JT&Z !($)(Jh DbH@&a.a%( H A&d?NBlM@AsavQ. x 2R4RW=1{lI%8Q_dB|7;S.nCx<=\l;E?P4?*?FP=fEO6 P1Fin= 8 e[˃1H:F<0}M$̒#NA 7ڜ(DžTot`~l2^Z5ahPu00o06fi@2T yC.SPhE1Lma$xwQLj{{j:~x1;\߻c0g;mh=h-*oVi?#{/MIR4 IA}i˗HYWwp|(;6 tM'Z(D@E t*ۿV~ <`D^Yh =b֍"-Z"ؕ+!9$O ݐ e~H}'l\qÁA=ǂV" &98.'d|j L?].\#/ (dMpI~e4D}.k(ф8vHl[ e&s!ym%: (4*4&hF hBhP  H.`cxnq1'qg9c|umB6miC6tyL<)X|N?7}P/!'mV)_[PqTSF SJ`%1Yx_5H.ޣ-HHʘ4 ,Udaņ2#W!)8œIO$o) Ph햕ETc UvJ 7?c2w{;*E0xc*D0'שi(Q:WRr)ӊpd%4ٰp0(İ`kv3ѴB~lV .}eq:NE^F?]ZCAAIO٢ )8(O"5 ';f3+#u#l%W;)< I]RTun=:]ÄTʑoPt6;DE.B qGw cr q qNUIƉ%:~w#M">d 4[yy N:]IA&YShAp w7~$},dB(wgT  3"LN"PКM6RnL 2'̞tFHHIaI9Јvs2RBAå"!SF@>@q c /E!S׍D8L&;μT& pv;8JI (tY8 BA@$c`(W,pDN^8/|]^=x7G`CA,M;B9D\..nL>CI"t;^P#P+oLFClpa!['- Oh0G#<2 ei(l_rc|g812("Gۖ hh}ܦՎOAqB>; 'e@Ǜ) D=϶HxAӄ9n'Gg`8E`4AP qY@d`K;#"&F@4s'wcNSh\v"_#NrHNyԿ#/?gbꙭ}pIwf`=qY X H1>7ÜQ;U ycX(y| u- وg v"SXc ,&!4VD2Ue(hWn +9ŠOϥ2lYʠu4? t3l?9(`ڐ(! "9j0™qh!G)h \4U45O"޵ "ab_kD+SjU^ZD8/RH1""3`4eCJF#N}hSC:P'HvD{ D" 4?t 9 Qk8sb U9GO>'AL-QؒSTN&)! PBrH#.Bz%*=0]N(4(ċ:1T&1Pe肑:!'P9BE@RL*RHP A'&ܣJBd9¸I(9N"l?'&}"CC?B j_Eܟau(PU#B '=2C*;¢pIUq8lULREUQELQL3D$3PA̐Q11DEDV6bhbeOl^zG߸ U㣃B]- PBUKHϫ׽ʈ}YV*(I3-4)<ᘰ LɅ*1،GJT|'HyѦiLDL$`mGd')"{7Y#F?2䰯 O_I:%ϰnG2|BQ&UhiM5H)0f(Q"f>ZD` P(gb#}O/b@ O'13J/%Zv5ȵ7^ ?B)ۖNF39Ubr{u6vʊnʡ"dBQ%; 4T%N<hpgRe ~?:s R\س!3}i}tWk7ynlv#EgDA\F`ž(>~C$S 3-|? VQ!KQn'ԕjmfRujT ꌥt6^'|'WWsCQ h dm8ͳN]SdwAs !z/hmKtRXp]K?Ruܤ53o>)Oy;GMQ343^ MnEHG"S9p5cy3M.0ӭQ1P/KkOǫhcþ)4 E#=ᮽԪ]uJv̻V}u価$P[{HkJ*܁/6h/L8N*Z"D [-p?3G>08 _m546A7YuԻ"B7lM/o^lB6 ѣ |9*`>>y;zm+j{~ l HqR2p Õr\&kѡ!Zi!<=92D\{1oIG)N6YE"Q8?A L~P{sqH׬6gчFTff, Z<{A{@QM q-%+(~XۅŠJD45!+,LB V7{Mm+ qooc|v،. ԸEsnC Д $P%nE3DREDQ4HedD2nb]7C&8\ѶR" ɆPyvdO*6H1a48~GԌY#Xq֌5.'l6b0znzbz < Ib'h : lds(Ϩ{OCŜx`BDB0dv(^u@dRÚ[.Jfr'ʞYCNoCuѴ0TUA1DTC? :m h0;̎pz!Mi`=ܨ96 EQNrxa=r^{y4؍;v&""y)J+b-Ci%Tn5L> X@Mv/cX"c*Ym;Cdܑ|Cx= !lb}ÎPO?!L$O{x~ beOlC  cUأv6b_ieYș86!ݏ*AӕǣƃHOBҤâL1D7pi_i|Xd0AC^_5Drd3˗ہڪg dHXD6Cr"M`i(JV s"@ ?>twHN4f;ʫ)ڗEؿՙVV NBg.G GO5Dե]EF7eeTJQ .L ޛ,:D!Ӏcxέ"o79bKl6 <C& l%YppwO[-9/dITrj[P:Ǔw=R22 ֣e՚iVB њ5 'Ԉ4M - ]U Q1Ee*i+,iщ.*M^(QKy?rs7eeZ2˕nf\TU YN@L"6sWT 4iQ,zXxhh˸؛ &Xˑ; v"mˉlqPz#nu6 kPxMt ֍q٠/"\r=蓐ʈwq7hbx>^HhEfuBrf AχQ ޸փxtu] `#E~>s2HZī̺Ƌ>,HҨ >}0?@mi.4#:eS)4j gI =El8uIGfP4! sQ]K`&3'cBiAK()"¨#Kpo`4Nu(>p|?WukF3 -uTQy$GeCha0تDFr}d/L C`,8 E},*K}}9{cy2A!j 8T>~Jb030րG hwZ ;p1qwSR I#@8Bիy#NK<㎣<;^ƥ4JhSƠiC!gA %񖘇O L0qdx9Nru'f;:wf`Բજdd<įYVX]2$l.e=a$i f" >Jfh ff `iH fTb }B~ [zZ rD Gr3J w8iOfpe[}?殂'\=<ν؊7F ͌ݝu~~im#Pb9@@dK\dK%";3N!h1d򲄿%}4uV]AvSM;wVeP=w p,%1"iNBܞ'r;=ד$J=" SW DG&pi02paCِ=梜}ŵ3-@$rɆku rބn$0`/)_>a\à r&C w u&%E!;wuc$vIբ+Ec:DЏ!f5J{r7BfN:B2J2IN?6^v$C[r0]q6` QݔXP|c>M޹݇'hL>N4<ޔ}8O{'?@N(<8qÍWEG"V&FzB:yh"P"UR )F/ < q(2`4d : <'FCҤŒBB4٤! LsS3ܒNu:0MTJP)Bh)kw N LPR21$#0u7N @wd\$1=]@BlDN/tM"_xGhqÇ[a=pa0Qw& r c>s M1u\ gODcӼ{ĽyW;& BS3XdId!DDK<',JB 0Dʲ3DZL:^i=oxІ(ö/eX ET4~VxIq#PpuOYZ}jp 2 iN rp`&"A 8qGv ^nQ/ϯ=%U$kŐ݁m 9g[8ذ~Y jw>#a|! x='B,'!>d@fivOH:`ƨAe13L;'!mT-|LȓE#.a(Qp Qç'n-z`F c4MnٓbyIH#qs+i !@ủGd#l 1} m:*8lkqW: )ujSKt0‰1jI'k5~5Cu xn fzō.1a&y(v wh PŌ-F( 9< x^@(D<#їz݃MI;Wp|L_) 5L.z9?:/wz箐;dF P zjXxsD4U8|ÇiAnE LH5b4Z)JIXZ .e*?*|RNC,"PBB z}Ql/]%LP:C'^wzN{PY?tr%# raBR+WV#z S''0]~ |5 U) D nS? t(k`X!O M1% PR$$@*%&$CHi~W屑&Kԩ21+"OatCTȠN2ZThL3EldZA҄@.aL2.fQh\'b}!"ʸ>WϨH?(qLH^qA>)HH2@SڹO%ƟNg| ?ڛ$FC㗑y[T@$ KB;]2 گY PA%x'jn`cdPnNbC9~?!J=d?%C!ϪI52+-QP!݇dN !탃>M0{C8Ihkk B_~ν?w_:W#;G(^_ 0&ʞG0PR K0iPs(dBQ9'DCRLĀ{) 9`"W(M? DU$h4D$E[e%G#{dDPRDM)/ypѥG2Q(Hy;CJr/:cSH.TGSؑ?K(k*haHF z-sAJ̧ۀNcX4D$ %4MEz<͹?il'#WpW {  %={/AÃ# _ôS=q0'" ϙP!ˆ4X,@e5T ANCZY#D0 XacDk[$Qv_{M|w}8:wL-=Jor^۽lx #&6 f av+1\00,7sbOhuC]c1Sck94Ȗ9_/k yU צr.? WkN3^C;]qWxQ5:A->^`:2'T¹Y(Jф%uF#9LZr9O5-rSكvGb;zXED"M2]=O=oɿƹń;جUQ} 'Ot2n.!q< +Ah']0]+qՄ*Zd Cz0T_9nE $^p24vtjC9Ma5 Q3sz*Yg̜![[n0tz=o_kJn?~j=]n$f"Sùj^YZ(Θ1&*̣٬3EWrNFIt^³lxbUUMIF~&gn> OXx^!jpUɥދ(=_}q }^K !3~hZ!wG=,ИzRQ$@YÁKm!& =0IlGlKh<2ѝ= y>#k\eutT>![z=v [dq݃ӳ'V5k}YV!UV,ە6S}*<ʚи$"C1f!Pai4g[H_Q LKBi< d D"UT-1a*4B-U"$J 49~KNbs9Vs-w'{"0>KK3_4`uH:̴1E·F[v&q0?4bv3̥dC]27j.D1R~L9RG˙A_k B:Oaۣ͇w9ɉɬ %X)D/a)XZEiHbR(RVP!I`Osc@x< >=Mѡ$K^4Q^JMA)kѿVyq+$Tf~/t{;P'5@M1 h@(L(7h!M y5Ζ|y ~ ocvC!tȔy޴07`(`t'{< 6zu b BĬɴ;~NZSRu SɅ7nwduMg+ EJ~)Mݎ\)p,qݎ -^ByG(Unz0t6N7؊KL*hx"91.* Bo4 0H!/iA{"u $m܄AyhNv|:~'+t:|v ` 0,0D膊h171<_0ܹM(c)Ga*US=ζ玩>q#^0:0oӻ˻>skA8l:]m %jY1K^M~q=aޞ]?Tvr <3ŨNFC!l;%\ g~K%%%]KǶP4$`Ô}MԕkkL*U<\GyIfsQr!Ɉ-(PϟhIٓ&?"hJ`Qt=,әCQBb&YɝgFܚـnєgg8(h^b$O0tEUr ޢBvI(R;py >PdBݘwYd3~20lVs }p7m%  M>'BC]å9dU&}QE-xʸ$88ĘQ΄R2<;0\5p<B8њ؂ZBn,d&BH $ #" I$CS@HA> N0^"EY;N$ lpkN4+n$ƬqUn38T@@5`MӭER@z;hkH'FS. mlյ%1ULTDA3]y"*71~8cxz5:*)z"`Xj`^SUB;TĔHe dX%()gu2Ë=qzϵf#]<ᶍd/5iJ#O4 **nl"݄p;dC0aәڅU2œH[`QLKuDJKU%y2^x f{xd8L1 PDtFRg D X1 ;hR;o1# ?^;>G9 9LP)7*13ΦM(G[`H8⾌~E眡!;i6*ĦE)888D(&UDAÿ)3JAȿ-B8 S\"P*;)DRU5U3l Lyt\Iː I(NLSܕ9h.~>[Jz7>C3Et̯D4ƕĕJvA\Ue|3dl>z4HD!]qtȋEAcX׌71d'rz#~]Wxc;e75Gji89`Eak]k:fTFk~f Z5>88T&Ǫ]Zxqŀ Tlډ~P#qi@w dfI aJVTg"?W|prwD(0E8IxSS~0|ݍ\?!mD8&R#f SN&jl,v'2GWYL(F$N-!oEO? dB zsBCpa!\y rHvF q=w>A4t6xBJ HbG0ȼaRzSz:bA4n`}z"ȯp@؀J_ShԴ|d Q"J VE@B"b!2J w'!tqt #)-ЖܠH5 &@=(hCfXO@KR:E#;ϕ u^r@cIFөBV 0;)Ud=c7 GӐHa2˳ O'KI]cD׭gۗ86lHc;xף$ )KZ2h$x_\.coUlXKYSMPq =ʌE!6TN@/#`2~Ӹ8ےba?&nmw ր~:#ehA@F C?)b h0q}=u|)^iRrp9F8Lq;Nr@(b67q,E=nwn\78\̯f8r%wZCS%J&"3,p !`,9 @-onNnc6utj!ɳOW^@6&@nhnP2B0m ~]I0I0cm18Ӗ+ڝҧћ He?08xy:;~eh rYP(a P-"Xlzpv[aYU.X"O[p '>-fc&\[Jv&\ kaW C`~zνqG04iB/VxR-lV̆/gOmCKE Adʄ%*J=Jc ,wA]846q׏-CtG~m;z`t>N\>H% (Xg"&x_ A_a]wJAS l:Z*H%LS1kYUEUUUUUUUUUTUʊECLЈ|i8//S@q }:pd~w(mhc& ،<08w~0?p>4DLTI&$ \HhZ-U+|_}A &Ӿ#?8O3Lpd6.v `N IaBw~u+|pNcz &7o^%>Aj 8efBg#2;$W*߳*Pq>N }z$ RQ ˌso$82Yuc|ԍ`*@PG|&۬Y{H>tSJV;L~]‘")J϶ED?aL!QPD tHP4A#%2 tp$0/xݾ4Pza0 PFj"EcIrseqLN6B(Je1d?>H`~] W )Y:X֍8 H7FāH =l]>vH+ϓz /D0Ga xQ)Qf T'ۣ(hBTM ~O)@z;vzKUav2{S~0a" H0?2u  t^cB.?F @%(rɑ8@v*d"1 2 ЈJ(Q015 *t`)JpwH bCЀ D} rհ}XP )E _ l=Od^"^=N2_zR@}^CԹ>s~It-:#psx| `C1NqѤ98PM1$DF($QĪ(yk !Y&2G H'4¿ HMњz4'`&jкT-,{_7T<#J [o˫D)E4EB4P hvARpCbdTRTaje(#S5tPރ:p/n$<_,v0C );N7'a3rD!r5bնIQuqPx^pdpq;nC "ɰ h)]ĨgHWuq\'?(pIKI 1ce8hVJ"G7Mk$d@6304y!\TNdD,[ -B@IbU @{Mqa``n6-┴jSo2#\b>ܘh"B}Z=~9 t0bfD7vDp9S} Đ]w< ٪g(h5ѵ $ys9*Mϔx"OZY-]CnZRQ#D^..ܝ˸)p:렁SJtXSM*uӔ e ZRɘEJ`YPcBg͍xToszTe_uW|$ݩ'9AՋ ֦ |^mL3I╅& &ߩVYT1ODPz׏ݒ[ݪ"S٧1@Qu `DudB_pL?'"y3HT0 p{iyzZ8r([1:]R~kQvt9^0f毊Fvt^vYDibNzTn1߁&zدJY<&$ ᪰D;跫4 {Mrc";˔Fz3ט LQhUaȈ=4c(=i*8oTy9lizd^]4h=0k2 GN0i!k["YEiuSG7 }uo\3\{*)#m1͔:c7=–`CI.2$ANb9\NUמu~6cTGUoX==vye3@nb%uy3xww7K/ #pw=_4_Xw;1iJwLȴ%Y+aF "]Dq: 3qd[*PkU i{ !9 Of E++DM^]}8R\+5!\$!B@vd^D*=MhCa@ׯbCמ^^C~q3_)Ŋ |E)B̪؞qɀp`p}C<]kE*,yh JYUn7woTo-٭_zpFXMa斶Tkn`LzuEsmf󘿁;[_;hgV4qcl_܉P]`2p9q7FJ%}]9%{|s˟1^;\>)J:8!7AA 0ǛuW0ZXkd E Zo-\[AJӾin1z"`B I(i!B)D;auxضw) ت Ha?Px/G0=l+yEH+FжoF#mx4?YUgKq^ 84+FFufZA p8/t%es;<\=Y5f>~xhϏ3=.~'$Ucv/(E2(]1Nz›U:@$P J8q#f@`}x]Sa`  :=|`s:'i;L$?W#jj7S5H@L/pC3uIXQc\+Zv\Ӣ" XՔ2d㖧upe~nqL9ke ,ںڻW/};fH%d$a·#o`H嵑쁻:&^ nWuLl:|A ]rv$Rw3$'7 B&h00yQ zCDvH%V9(*ߍ|ў} -)uJޑ?{ F Z)>DN(0N\.< Bp+m 3ZX}Dw ^ 4`E8p_@,;^QR]Ѱ ߱R Ip:C[veD^'գ@|g5zH?6TWGLXʞ:2/&254{;`>aԈ`y< t.kcU&Dbr@ц g͊Q̝5;?pI"dG!ѕ8ÅËu(N=X侴?p_X}gO 0lD\LMc'w4F oNg#|0^CNCQ1$8Jy %[Ičd4m Dx0[i h@mA}Oە.{ImdŨbCU c(L͜af,\Œݩ֊VpfqC&gѲR:`3nV,c^w|)(C=2P˓@ A..Z`H";חNEJǰu"D ;8I QD$6=CזHo$Ԣ洛DD%!@=!f_I@ZAKwD!0A%IB EP2B2&WA=#k#>B e{(&Oy9{{Y@̑n^naiz[;I<\4eAMƈ!D`}pK@2,m DW r<`L \lJ9-j}cȽ[#]rZ҂,D-ab@aXc# ݥK=nu2[IE!>ih($~A27dEYޅEDQ~?D֟ag\?=L3NN ˅6 Jr-p#F qڂv/qi i//`J(,N$r :O5{e(ZI9=PEHLR!J]д PW rWD[ն@0L7KT_-?<450H_Q:&_a{~gfۍ΁t7Z=J(zt)$0Np9l%QhQg"qL<3vN #  _R`/%80Hp& Jd6 g؆wypqF E&;kcx `O 9y̋&=p!?'}`2d33̆' 4(, 01}?O Lp&N"#+#C_ձA(S0H(:ƈeV6X]h#iєʚy#>ts 6*Nx{(:vxR)%^~YD)ӕӫuq7{cC17ƁuEGF.WS 1H=z L>#h$z[B' h%R܀AǴ (*V,\3C]r"UUFF쓩z#2Dga&C=3$,)e!mdgUl[".j*é,"D TӡAUS#"(wvC&s[suHffa`x;[ar.v '8jUA9Bsdb$.RÐrm" DFTE3H+*R8s1!S$Ddk#v"j9٪FEHiy AAVLN>7" Qv.di(8ĩ/` AĒ˵Dĝc!A0GM{ *,--/yD*x!"8L/u>CC ]a{غPh8=X"U/mI]c i~c/0=ǗOa">(և q"BA0Ѱ~&O8A(Jw\+?աNo@ 3<ݿ'0ME.Y8d&ЈUbZyvIV)4 l{']#~p}z%=2D~l ĸT4 p{8h Q*ty כ;Vn`0BFێ ôbL0+ILgnf8b )\byÓq&Btƥd#c/0 DEVK;t  #j*b>6BY'zanP I aA!C? OJJm'-!A>>&# 8]|*a&UE*7`yP1;jB;b>%\I) /և9qiYފ~` M7qmZKݭTC65R]7wa5*N o4ou'5ƺU3YɂkrN5y*8{u Ih]Ji(I("MDvs]:QX"F7RIhI>lMYR=DH3{2xXhGzat 6tY]FF ]jAfmBځ`ej޽:DPRAJ+nΈ/5< K"Ha8yd.L+ D(P83L"_<:@f7ըҩB]Qag$1 ]F61:ۡˋi{$%av.Jae."0IͱJJSj>~Shϟ91o[,i嬵!o>o 0ہh&peElcf}t]E$ezH. <TB). 4Dߒny@/(~,哶B`0O6,[b oz>Օ/уg.J"< ӺikXNצYE82/3Aİtygh0Lg[fۥ_YK/-D`Ylv85p?@^k+⠵՜$!/`xp c."BHZSͅ y0wZ5 H#87Z;η0+,`t=EA4DYˣ|uJD'Ø$F2 uv:IJm IUC\6 BjM?={ˮUUn-P*o=oZL7 k )9\q)vli5W~fTiu'Nf|D̢4n{!Mmig.چ<:!*)(m3J|e.EYV\i'ݸ(HTI$0'G+R-ݑ x=h~_7y@^C=Ǚft="j58՝17ˣ3љ^UUۮlJ1rkň9 ;Aj{qc"UiiPp#>X@F*i''zKrzLٺC:#=NĪӑ\M,,e{j *.xqt;ȘҠ E26ZkKg<-", t𔣤Պ*y]G%~Ngj vZ8 a#/29ћ5T)R e)NXbT^LcjsB#0 vfCZpߠ@q0FuJwtgZc9_B Î Y4¨0A)QPV]y+Pȴjw+]_D}JuKMO$C%3gwt$MAA1razloߜpAO޳3sFu5Ewvz[0U=M62}3YgF: VtWVJ]>T*x*.:jN|shVŦi5Z}H=gj٨:_,鮷oڅ|t^Re/m$Zd-4+aol7#a]nv?f( _CWILS[g9C絆Eo.՘iJB<1ĖĽmTry@iZ=ױG6sIx$$qBJ FNWP0(%J =3xÿop}a6X3Z;L>0Y;GY=evr~EhG}WG!J,QVv f%l#\hBqQG=N0ưh؆61Gqt/+ye=6:7o[]Z7,eKAx@-y=Ȩu"hbbIDWei˼HncchLnϞَ5$?бˇbPH;imH6/ BdZiD4b9C!W.tuS6Mq7see'/!~zј"|72ǒ2: ,_a|gղvn vCez8QTNЍP2B>@e,R>z}g9w}iӔp h"G7#ccx 90N bD8] |)"~LcZ / epHav H;3Ţ-%4XorN=?uKa \_I|=1|!R@:Jy>|x@8#B2n&*#$KPѿ\-윟gxaasn ?,0qQHcw|ݜj>Cv z("vHBH  '=e61;7 >A ",""H P""juN!N<8rDg957@ipllftDи<6jd@ Z,6`maG{0IՉ R%!䎰]w >||Gg&@z"!p8:oD^ES@ pZiB i;>0vʞH#.1iH"I~l)>빘<]Ļ8f0c^Kh6'Wmd7WH;ݮGKlE=&iY8wyl*K^H[ kl#gmjbbHX!Ϯ/:H_Eڶ~3j2m@J nIGZaWua\&Z6,0 1tkZ,s G >X,] %*RDD7x Ɉ kcţ[v:*hsS5B_4"V]_rs5'|_ Fceԓ4@S+F Yz`ѐ_I󶋟n@NlEy3[5fmOE $`/CMØ=8>9u>]ϔ&hB3Ɗd4-*D+H'ؒns?m1r;T1 rEA);{ yD̆VGއQSP"(E}p ?\ ;2;JQa[r114gF'w#Jl'ʞz:?BKۉM?' 3/is~Q_-," HLŃL짃,99R) >n/OЈ( JcX1 <{H7ۓ C菇s:[wv$"0 ybhh=wy`l÷ZCOola_:t`U;B^gC1s3E1>{k2Ȇc1T V++EdQ0dhpwې/85l2^ t9Q%q4([sdnJ9"43&%6+ygCJ| A2Z6p8S'Pif"}l'@uzՇyYyaT͆wq]AE;DcӌH| GxI5 (o]A(wv:u<[wت?;PETeHp .8'K98Șѻow7spsZ~A:2iaDA&77cq^rx[ض6܇JځΕe-A;on@݈Sϡ ŀPQplp~@vSw^Wq8#"=t|&D&ܛqwzZ |)yT}+Tefc&H&'`A o_/ iNH:>N1q#ܰp'E b.*6u*oq~SC-]߆ i0Q#q d 49c^qۛ8;DHHL AG9 C0DlfaVKINNY퇉 isS0$r!ډIՃnO3JuhMF1>_(`maNs4+LŠ;&SWMNn9  fr!fl&+!0Qp,bpy=U\N3mCoC$]b(ͱGs2mS4ςG@*V,9r42 3V4 B7vCJ@90 s &5]F|5%B&;drS01ᜅAJĦC ADdM.9aď* 0@RԌJҁuq}?B\$`:841t+1EP$DN[ <8a, oSm{:T1PR`٠xsÅpsD"5TFY9D6YCY i⑻IR >EFF$0eƍ:kUn:^3Ay|95:2r#7[[t[\ʮU6n|X?\ RǕ)x30Ô(VI?LAL>fƎ{#7-|gdJ Q2K8R&qj\ε[v֯g;e|wQg'6MU Qx lIP1$K1R.ƬKeww fU’PyK(zEP\jRhR-~p\~;'SVՐ);[kctE"'_VUB R&HÏ0G'eۉPʒŀlHdBbg9ʼnY P&D|A6Ol|Pۊ hÌ~!Ŏ޿?F!6`7d zrxvxfk??^BpP*jzng'’/*Q",iTTLut/R){]T;U2U!Z=0dQv#?u3AOT30l~oe]$A!d\ HX$]u&!ua+!~< c U ()BWPDGkL& @ی>;EU @6/9NpidzpiXɐM_[QtJ/tI -f1VlO)?$S$Tbd3Pdb4!`Pߵ bS`z0FW1*z)2g'Țꄞo1kv Hpa?e( Yu˃0H';osy*DXqɓp;a7B̭; aɴ,!;:Q7uRAʂ'h/"@CXgQ)H 2 a›^xCn zbB 'ȿ?=}_96B!b1"W1מ¾lmY Ŏ=?H4#$`$1UrGQ)1 %C ?2TNL:I;Sb8S cQ&40߅{CBRH_D6Cӣ#?}տtl3BtQ2x.)  ?!J~1%+MP ~8 %z$h("*`i&#ôSXlIES@ |8aI90t/EBJf <}EaIScL یsa<љVbC?^iS?0WTҔ89D԰gӸmIu(:9yfXx70w{3j& GUܑaŅ8Th؁$RD4L 2A0  *h3π!>0II!ӸSfA?Pdեm<|!-K@EdQ*raiev&XqgJ'AQ[Aq\TT˖h]6ȜcB6GN;Jz3weiWg8.cq0P!N"'Xݻ@chv$"WCLG&f)iyL 8@Ѐc!xn!7\tEAhIGK+腂 LE9'BqNHAL0{6AvNthߢ114Y˨t/~ G`mp_XTƍXaf?.!61RX2 z t|u;pDFdž<8b&9' RKd,!8ܙβVF2 Cse(t 7[/2 ̦!X)jX+"ILJMP +$M EQF"8AT㿑ܩ|(aLt.զ>oOgh~7C̤꺃TIpL%!LUTeo^Iܬt9:u;rDeM! *q?kyT1Ĉ$ZPp.p A>8;>Y+KK.>{h჉sdI:n HShh~0mgwY\2ڰ&bHT YCcgK!xIvHHT ~qFf"vhQ5$"w۠@0!z ? xso0 ;|ħTC80$SU-̧{hw[/?.h@D7J<3'^ T8gBnNbDRJ!/oU!$N/BM{ }.݇>G2zQQaO$HO:Qs9܍چ,.Z$5=~]K_B]J:'K{ H,#.&`а%*0'4x~$2l*v>>bA /vGw(2͈N("*s.4nI@<{IB[fѧ.3hW,;?;,kh^R8G<4*;D!x eO8 $>9ySؽEaѦJ=Bn%la_EOhydXH0()cmQݓSUiLIᏬqҎS2\Ϋy7'Cp(xF4A\˅YV׹1l[W.*p fWPDzNv OBq n\0(E%T^L@:Zu5xW_#EN9D*zOC }4RM˱EB:8aG__%Iw/WR#Ģ& AQ @Ĕ*%&  (*%fF (&)%DJJ)*&)%hBb$((ffX*!)(b]B &HtQ1- ?͏\k=ϸ?\ #? xg̲p  4D)-Ds ` }r\wu2Iza@0?%낄SJ] R!4b'@҃(JB*"P{N@<9J_RUIeKD@4&!eě :GHݕH;Oq:20(B֚ tA1-ފll{ HS氐p4dcmtTTgw?8̯HsaZ9S24"3sjOOH ;"7patches/kbuild.patch0000664000076400007640000000134311160524406013530 0ustar tglxtglx--- scripts/Kbuild.include | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) Index: linux-2.6-tip/scripts/Kbuild.include =================================================================== --- linux-2.6-tip.orig/scripts/Kbuild.include +++ linux-2.6-tip/scripts/Kbuild.include @@ -98,8 +98,9 @@ as-option = $(call try-run,\ # as-instr # Usage: cflags-y += $(call as-instr,instr,option1,option2) -as-instr = $(call try-run,\ - echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3)) +as-instr = $(call try-run, \ + echo -e "$(1)" > "$$TMP"; \ + $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" "$$TMP",$(2),$(3)) # cc-option # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) patches/revert-sched-changes.patch0000664000076400007640000000327411160544617016272 0ustar tglxtglxSubject: revert-sched-changes.patch From: Thomas Gleixner Date: Sat, 14 Mar 2009 11:22:29 +0100 Signed-off-by: Thomas Gleixner --- kernel/mutex.c | 4 +--- kernel/sched.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 7 deletions(-) Index: linux-2.6-tip/kernel/mutex.c =================================================================== --- linux-2.6-tip.orig/kernel/mutex.c +++ linux-2.6-tip/kernel/mutex.c @@ -248,9 +248,7 @@ __mutex_lock_common(struct mutex *lock, /* didnt get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + __schedule(); spin_lock_mutex(&lock->wait_lock, flags); } Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -4773,15 +4773,13 @@ pick_next_task(struct rq *rq) /* * schedule() is the main scheduler function. */ -asmlinkage void __sched schedule(void) +asmlinkage void __sched __schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; -need_resched: - preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_qsctr_inc(cpu); @@ -4839,9 +4837,15 @@ need_resched_nonpreemptible: if (unlikely(reacquire_kernel_lock(current) < 0)) goto need_resched_nonpreemptible; +} +asmlinkage void __sched schedule(void) +{ +need_resched: + preempt_disable(); + __schedule(); preempt_enable_no_resched(); - if (need_resched()) + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } EXPORT_SYMBOL(schedule); patches/revert-bkl-cleanup.patch0000664000076400007640000000126411155175736015776 0ustar tglxtglxSubject: revert-bkl-cleanup.patch From: Thomas Gleixner Date: Mon, 09 Mar 2009 12:20:55 +0100 Signed-off-by: Thomas Gleixner --- lib/kernel_lock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/lib/kernel_lock.c =================================================================== --- linux-2.6-tip.orig/lib/kernel_lock.c +++ linux-2.6-tip/lib/kernel_lock.c @@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFIN int __lockfunc __reacquire_kernel_lock(void) { while (!_raw_spin_trylock(&kernel_flag)) { - if (need_resched()) + if (test_thread_flag(TIF_NEED_RESCHED)) return -EAGAIN; cpu_relax(); } patches/revert-preempt-bkl-revert.patch0000664000076400007640000002522511160544617017325 0ustar tglxtglxSubject: patches/revert-preempt-bkl-revert.patch [ basically, this is the -R of 8e3e076c5a78519a9f64cd384e8f18bc21882ce0 ] commit 8e3e076c5a78519a9f64cd384e8f18bc21882ce0 Author: Linus Torvalds Date: Sat May 10 20:58:02 2008 -0700 BKL: revert back to the old spinlock implementation The generic semaphore rewrite had a huge performance regression on AIM7 (and potentially other BKL-heavy benchmarks) because the generic semaphores had been rewritten to be simple to understand and fair. The latter, in particular, turns a semaphore-based BKL implementation into a mess of scheduling. The attempt to fix the performance regression failed miserably (see the previous commit 00b41ec2611dc98f87f30753ee00a53db648d662 'Revert "semaphore: fix"'), and so for now the simple and sane approach is to instead just go back to the old spinlock-based BKL implementation that never had any issues like this. This patch also has the advantage of being reported to fix the regression completely according to Yanmin Zhang, unlike the semaphore hack which still left a couple percentage point regression. As a spinlock, the BKL obviously has the potential to be a latency issue, but it's not really any different from any other spinlock in that respect. We do want to get rid of the BKL asap, but that has been the plan for several years. These days, the biggest users are in the tty layer (open/release in particular) and Alan holds out some hope: "tty release is probably a few months away from getting cured - I'm afraid it will almost certainly be the very last user of the BKL in tty to get fixed as it depends on everything else being sanely locked." so while we're not there yet, we do have a plan of action. Tested-by: Yanmin Zhang Cc: Ingo Molnar Cc: Andi Kleen Cc: Matthew Wilcox Cc: Alexander Viro Cc: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Ingo Molnar --- arch/mn10300/Kconfig | 11 ++++ include/linux/hardirq.h | 18 +++---- kernel/sched.c | 27 +++++++++- lib/kernel_lock.c | 120 +++++++++++++++--------------------------------- 4 files changed, 81 insertions(+), 95 deletions(-) Index: linux-2.6-tip/arch/mn10300/Kconfig =================================================================== --- linux-2.6-tip.orig/arch/mn10300/Kconfig +++ linux-2.6-tip/arch/mn10300/Kconfig @@ -186,6 +186,17 @@ config PREEMPT Say Y here if you are building a kernel for a desktop, embedded or real-time system. Say N if you are unsure. +config PREEMPT_BKL + bool "Preempt The Big Kernel Lock" + depends on PREEMPT + default y + help + This option reduces the latency of the kernel by making the + big kernel lock preemptible. + + Say Y here if you are building a kernel for a desktop system. + Say N if you are unsure. + config MN10300_CURRENT_IN_E2 bool "Hold current task address in E2 register" default y Index: linux-2.6-tip/include/linux/hardirq.h =================================================================== --- linux-2.6-tip.orig/include/linux/hardirq.h +++ linux-2.6-tip/include/linux/hardirq.h @@ -84,14 +84,6 @@ */ #define in_nmi() (preempt_count() & NMI_MASK) -#if defined(CONFIG_PREEMPT) -# define PREEMPT_INATOMIC_BASE kernel_locked() -# define PREEMPT_CHECK_OFFSET 1 -#else -# define PREEMPT_INATOMIC_BASE 0 -# define PREEMPT_CHECK_OFFSET 0 -#endif - /* * Are we running in atomic context? WARNING: this macro cannot * always detect atomic context; in particular, it cannot know about @@ -99,11 +91,17 @@ * used in the general case to determine whether sleeping is possible. * Do not use in_atomic() in driver code. */ -#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE) +#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) + +#ifdef CONFIG_PREEMPT +# define PREEMPT_CHECK_OFFSET 1 +#else +# define PREEMPT_CHECK_OFFSET 0 +#endif /* * Check whether we were atomic before we did preempt_disable(): - * (used by the scheduler, *after* releasing the kernel lock) + * (used by the scheduler) */ #define in_atomic_preempt_off() \ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -4920,6 +4920,8 @@ out: asmlinkage void __sched preempt_schedule(void) { struct thread_info *ti = current_thread_info(); + struct task_struct *task = current; + int saved_lock_depth; /* * If there is a non-zero preempt_count or interrupts are disabled, @@ -4930,7 +4932,16 @@ asmlinkage void __sched preempt_schedule do { add_preempt_count(PREEMPT_ACTIVE); + + /* + * We keep the big kernel semaphore locked, but we + * clear ->lock_depth so that schedule() doesnt + * auto-release the semaphore: + */ + saved_lock_depth = task->lock_depth; + task->lock_depth = -1; schedule(); + task->lock_depth = saved_lock_depth; sub_preempt_count(PREEMPT_ACTIVE); /* @@ -4951,15 +4962,26 @@ EXPORT_SYMBOL(preempt_schedule); asmlinkage void __sched preempt_schedule_irq(void) { struct thread_info *ti = current_thread_info(); + struct task_struct *task = current; + int saved_lock_depth; /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); do { add_preempt_count(PREEMPT_ACTIVE); + + /* + * We keep the big kernel semaphore locked, but we + * clear ->lock_depth so that schedule() doesnt + * auto-release the semaphore: + */ + saved_lock_depth = task->lock_depth; + task->lock_depth = -1; local_irq_enable(); schedule(); local_irq_disable(); + task->lock_depth = saved_lock_depth; sub_preempt_count(PREEMPT_ACTIVE); /* @@ -6317,11 +6339,8 @@ void __cpuinit init_idle(struct task_str spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ -#if defined(CONFIG_PREEMPT) - task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); -#else task_thread_info(idle)->preempt_count = 0; -#endif + /* * The idle tasks have their own, simple scheduling class: */ Index: linux-2.6-tip/lib/kernel_lock.c =================================================================== --- linux-2.6-tip.orig/lib/kernel_lock.c +++ linux-2.6-tip/lib/kernel_lock.c @@ -11,121 +11,79 @@ #include /* - * The 'big kernel lock' + * The 'big kernel semaphore' * - * This spinlock is taken and released recursively by lock_kernel() + * This mutex is taken and released recursively by lock_kernel() * and unlock_kernel(). It is transparently dropped and reacquired * over schedule(). It is used to protect legacy code that hasn't * been migrated to a proper locking design yet. * + * Note: code locked by this semaphore will only be serialized against + * other code using the same locking facility. The code guarantees that + * the task remains on the same CPU. + * * Don't use in new code. */ -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); - +static DECLARE_MUTEX(kernel_sem); /* - * Acquire/release the underlying lock from the scheduler. + * Re-acquire the kernel semaphore. * - * This is called with preemption disabled, and should - * return an error value if it cannot get the lock and - * TIF_NEED_RESCHED gets set. + * This function is called with preemption off. * - * If it successfully gets the lock, it should increment - * the preemption count like any spinlock does. - * - * (This works on UP too - _raw_spin_trylock will never - * return false in that case) + * We are executing in schedule() so the code must be extremely careful + * about recursion, both due to the down() and due to the enabling of + * preemption. schedule() will re-check the preemption flag after + * reacquiring the semaphore. */ int __lockfunc __reacquire_kernel_lock(void) { - while (!_raw_spin_trylock(&kernel_flag)) { - if (test_thread_flag(TIF_NEED_RESCHED)) - return -EAGAIN; - cpu_relax(); - } + struct task_struct *task = current; + int saved_lock_depth = task->lock_depth; + + BUG_ON(saved_lock_depth < 0); + + task->lock_depth = -1; + preempt_enable_no_resched(); + + down(&kernel_sem); + preempt_disable(); + task->lock_depth = saved_lock_depth; + return 0; } void __lockfunc __release_kernel_lock(void) { - _raw_spin_unlock(&kernel_flag); - preempt_enable_no_resched(); + up(&kernel_sem); } /* - * These are the BKL spinlocks - we try to be polite about preemption. - * If SMP is not on (ie UP preemption), this all goes away because the - * _raw_spin_trylock() will always succeed. + * Getting the big kernel semaphore. */ -#ifdef CONFIG_PREEMPT -static inline void __lock_kernel(void) +void __lockfunc lock_kernel(void) { - preempt_disable(); - if (unlikely(!_raw_spin_trylock(&kernel_flag))) { - /* - * If preemption was disabled even before this - * was called, there's nothing we can be polite - * about - just spin. - */ - if (preempt_count() > 1) { - _raw_spin_lock(&kernel_flag); - return; - } + struct task_struct *task = current; + int depth = task->lock_depth + 1; + if (likely(!depth)) /* - * Otherwise, let's wait for the kernel lock - * with preemption enabled.. + * No recursion worries - we set up lock_depth _after_ */ - do { - preempt_enable(); - while (spin_is_locked(&kernel_flag)) - cpu_relax(); - preempt_disable(); - } while (!_raw_spin_trylock(&kernel_flag)); - } -} + down(&kernel_sem); -#else - -/* - * Non-preemption case - just get the spinlock - */ -static inline void __lock_kernel(void) -{ - _raw_spin_lock(&kernel_flag); + task->lock_depth = depth; } -#endif -static inline void __unlock_kernel(void) +void __lockfunc unlock_kernel(void) { - /* - * the BKL is not covered by lockdep, so we open-code the - * unlocking sequence (and thus avoid the dep-chain ops): - */ - _raw_spin_unlock(&kernel_flag); - preempt_enable(); -} + struct task_struct *task = current; -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, so we only need to - * worry about other CPU's. - */ -void __lockfunc lock_kernel(void) -{ - int depth = current->lock_depth+1; - if (likely(!depth)) - __lock_kernel(); - current->lock_depth = depth; -} + BUG_ON(task->lock_depth < 0); -void __lockfunc unlock_kernel(void) -{ - BUG_ON(current->lock_depth < 0); - if (likely(--current->lock_depth < 0)) - __unlock_kernel(); + if (likely(--task->lock_depth < 0)) + up(&kernel_sem); } EXPORT_SYMBOL(lock_kernel); patches/rt_mutex_setprio.patch0000664000076400007640000000442211160544617015701 0ustar tglxtglxSubject: rt: rename rt_mutex_setprio to task_setprio With there being multiple non-mutex users of this function its past time it got renamed. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/sched.h | 7 ++++++- kernel/sched.c | 12 +++++------- 2 files changed, 11 insertions(+), 8 deletions(-) Index: linux-2.6-tip/include/linux/sched.h =================================================================== --- linux-2.6-tip.orig/include/linux/sched.h +++ linux-2.6-tip/include/linux/sched.h @@ -1780,9 +1780,14 @@ int sched_rt_handler(struct ctl_table *t extern unsigned int sysctl_sched_compat_yield; +extern void task_setprio(struct task_struct *p, int prio); + #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); -extern void rt_mutex_setprio(struct task_struct *p, int prio); +static inline void rt_mutex_setprio(struct task_struct *p, int prio) +{ + task_setprio(p, prio); +} extern void rt_mutex_adjust_pi(struct task_struct *p); #else static inline int rt_mutex_getprio(struct task_struct *p) Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -5327,19 +5327,19 @@ long __sched sleep_on_timeout(wait_queue } EXPORT_SYMBOL(sleep_on_timeout); -#ifdef CONFIG_RT_MUTEXES - /* - * rt_mutex_setprio - set the current priority of a task + * task_setprio - set the current priority of a task * @p: task * @prio: prio value (kernel-internal form) * * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). * - * Used by the rt_mutex code to implement priority inheritance logic. + * Used by the rt_mutex code to implement priority inheritance logic + * and by rcupreempt-boost to boost priorities of tasks sleeping + * with rcu locks. */ -void rt_mutex_setprio(struct task_struct *p, int prio) +void task_setprio(struct task_struct *p, int prio) { unsigned long flags; int oldprio, on_rq, running; @@ -5376,8 +5376,6 @@ void rt_mutex_setprio(struct task_struct task_rq_unlock(rq, &flags); } -#endif - void set_user_nice(struct task_struct *p, long nice) { int old_prio, delta, on_rq; patches/posix-timers-prevent-broadcast-signals.patch0000664000076400007640000000224111150327144021775 0ustar tglxtglxSubject: posix-timers-prevent-broadcast-signals.patch From: Thomas Gleixner Date: Thu, 05 Feb 2009 17:38:20 +0100 Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/posix-timers.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) Index: linux-2.6-tip/kernel/posix-timers.c =================================================================== --- linux-2.6-tip.orig/kernel/posix-timers.c +++ linux-2.6-tip/kernel/posix-timers.c @@ -420,6 +420,7 @@ static enum hrtimer_restart posix_timer_ static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; + int sig = event->sigev_signo; if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || @@ -428,7 +429,8 @@ static struct pid *good_sigevent(sigeven return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && - ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) + (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || + sig_kernel_coredump(sig))) return NULL; return task_pid(rtn); patches/qrcu.patch0000664000076400007640000001463511150327144013237 0ustar tglxtglxSubject: QRCU with lockless fastpath From: "Paul E. McKenney" Hello! This is an updated version of Oleg Nesterov's QRCU that avoids the earlier lock acquisition on the synchronize_qrcu() fastpath. This passes rcutorture on x86 and the weakly ordered POWER. A promela model of the code passes as noted before for 2 readers and 3 updaters and for 3 readers and 2 updaters. 3 readers and 3 updaters runs every machine that I have access to out of memory -- nothing like a little combinatorial explosion! However, after some thought, the proof ended up being simple enough: 1. If synchronize_qrcu() exits too soon, then by definition there has been a reader present during synchronize_srcu()'s full execution. 2. The counter corresponding to this reader will be at least 1 at all times. 3. The synchronize_qrcu() code forces at least one of the counters to be at least one at all times -- if there is a reader, the sum will be at least two. (Unfortunately, we cannot fetch the pair of counters atomically.) 4. Therefore, the only way that synchronize_qrcu()s fastpath can see a sum of 1 is if it races with another synchronize_qrcu() -- the first synchronize_qrcu() must read one of the counters before the second synchronize_qrcu() increments it, and must read the other counter after the second synchronize_qrcu() decrements it. There can be at most one reader present through this entire operation -- otherwise, the first synchronize_qrcu() will see a sum of 2 or greater. 5. But the second synchronize_qrcu() will not release the mutex until after the reader is done. During this time, the first synchronize_qrcu() will always see a sum of at least 2, and therefore cannot take the remainder of the fastpath until the reader is done. 6. Because the second synchronize_qrcu() holds the mutex, no other synchronize_qrcu() can manipulate the counters until the reader is done. A repeat of the race called out in #4 above therefore cannot happen until after the reader is done, in which case it is safe for the first synchronize_qrcu() to proceed. Therefore, two summations of the counter separated by a memory barrier suffices and the implementation shown below also suffices. (And, yes, the fastpath -could- check for a sum of zero and exit immediately, but this would help only in case of a three-way race between two synchronize_qrcu()s and a qrcu_read_unlock(), would add another compare, so is not worth it.) Signed-off-by: Paul E. McKenney Signed-off-by: Ingo Molnar --- include/linux/srcu.h | 22 +++++++++++++ kernel/srcu.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+) Index: linux-2.6-tip/include/linux/srcu.h =================================================================== --- linux-2.6-tip.orig/include/linux/srcu.h +++ linux-2.6-tip/include/linux/srcu.h @@ -27,6 +27,8 @@ #ifndef _LINUX_SRCU_H #define _LINUX_SRCU_H +#include + struct srcu_struct_array { int c[2]; }; @@ -50,4 +52,24 @@ void srcu_read_unlock(struct srcu_struct void synchronize_srcu(struct srcu_struct *sp); long srcu_batches_completed(struct srcu_struct *sp); +/* + * fully compatible with srcu, but optimized for writers. + */ + +struct qrcu_struct { + int completed; + atomic_t ctr[2]; + wait_queue_head_t wq; + struct mutex mutex; +}; + +int init_qrcu_struct(struct qrcu_struct *qp); +int qrcu_read_lock(struct qrcu_struct *qp); +void qrcu_read_unlock(struct qrcu_struct *qp, int idx); +void synchronize_qrcu(struct qrcu_struct *qp); + +static inline void cleanup_qrcu_struct(struct qrcu_struct *qp) +{ +} + #endif Index: linux-2.6-tip/kernel/srcu.c =================================================================== --- linux-2.6-tip.orig/kernel/srcu.c +++ linux-2.6-tip/kernel/srcu.c @@ -255,3 +255,89 @@ EXPORT_SYMBOL_GPL(srcu_read_lock); EXPORT_SYMBOL_GPL(srcu_read_unlock); EXPORT_SYMBOL_GPL(synchronize_srcu); EXPORT_SYMBOL_GPL(srcu_batches_completed); + +int init_qrcu_struct(struct qrcu_struct *qp) +{ + qp->completed = 0; + atomic_set(qp->ctr + 0, 1); + atomic_set(qp->ctr + 1, 0); + init_waitqueue_head(&qp->wq); + mutex_init(&qp->mutex); + + return 0; +} + +int qrcu_read_lock(struct qrcu_struct *qp) +{ + for (;;) { + int idx = qp->completed & 0x1; + if (likely(atomic_inc_not_zero(qp->ctr + idx))) + return idx; + } +} + +void qrcu_read_unlock(struct qrcu_struct *qp, int idx) +{ + if (atomic_dec_and_test(qp->ctr + idx)) + wake_up(&qp->wq); +} + +void synchronize_qrcu(struct qrcu_struct *qp) +{ + int idx; + + smp_mb(); /* Force preceding change to happen before fastpath check. */ + + /* + * Fastpath: If the two counters sum to "1" at a given point in + * time, there are no readers. However, it takes two separate + * loads to sample both counters, which won't occur simultaneously. + * So we might race with a counter switch, so that we might see + * ctr[0]==0, then the counter might switch, then we might see + * ctr[1]==1 (unbeknownst to us because there is a reader still + * there). So we do a read memory barrier and recheck. If the + * same race happens again, there must have been a second counter + * switch. This second counter switch could not have happened + * until all preceding readers finished, so if the condition + * is true both times, we may safely proceed. + * + * This relies critically on the atomic increment and atomic + * decrement being seen as executing in order. + */ + + if (atomic_read(&qp->ctr[0]) + atomic_read(&qp->ctr[1]) <= 1) { + smp_rmb(); /* Keep two checks independent. */ + if (atomic_read(&qp->ctr[0]) + atomic_read(&qp->ctr[1]) <= 1) + goto out; + } + + mutex_lock(&qp->mutex); + + idx = qp->completed & 0x1; + if (atomic_read(qp->ctr + idx) == 1) + goto out_unlock; + + atomic_inc(qp->ctr + (idx ^ 0x1)); + + /* + * Prevent subsequent decrement from being seen before previous + * increment -- such an inversion could cause the fastpath + * above to falsely conclude that there were no readers. Also, + * reduce the likelihood that qrcu_read_lock() will loop. + */ + + smp_mb__after_atomic_inc(); + qp->completed++; + + atomic_dec(qp->ctr + idx); + __wait_event(qp->wq, !atomic_read(qp->ctr + idx)); +out_unlock: + mutex_unlock(&qp->mutex); +out: + smp_mb(); /* force subsequent free after qrcu_read_unlock(). */ +} + +EXPORT_SYMBOL_GPL(init_qrcu_struct); +EXPORT_SYMBOL_GPL(qrcu_read_lock); +EXPORT_SYMBOL_GPL(qrcu_read_unlock); +EXPORT_SYMBOL_GPL(synchronize_qrcu); patches/spinlock-trylock-cleanup-sungem.patch0000664000076400007640000000141611150327144020506 0ustar tglxtglxSubject: spinlock: trylock cleanup sungem From: Ingo Molnar Date: Wed Feb 04 00:03:15 CET 2009 Signed-off-by: Ingo Molnar --- drivers/net/sungem.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) Index: linux-2.6-tip/drivers/net/sungem.c =================================================================== --- linux-2.6-tip.orig/drivers/net/sungem.c +++ linux-2.6-tip/drivers/net/sungem.c @@ -1032,10 +1032,8 @@ static int gem_start_xmit(struct sk_buff (csum_stuff_off << 21)); } - local_irq_save(flags); - if (!spin_trylock(&gp->tx_lock)) { + if (!spin_trylock_irqsave(&gp->tx_lock, flags)) { /* Tell upper layer to requeue */ - local_irq_restore(flags); return NETDEV_TX_LOCKED; } /* We raced with gem_do_stop() */ patches/x86_64-tsc-sync-irqflags-fix.patch0000664000076400007640000000163411150327144017351 0ustar tglxtglxSubject: x86_64: tsc sync irqflags fix From: Ingo Molnar Date: Wed Feb 04 00:03:15 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/kernel/tsc_sync.c | 4 ++++ 1 file changed, 4 insertions(+) Index: linux-2.6-tip/arch/x86/kernel/tsc_sync.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/tsc_sync.c +++ linux-2.6-tip/arch/x86/kernel/tsc_sync.c @@ -103,6 +103,7 @@ static __cpuinit void check_tsc_warp(voi */ void __cpuinit check_tsc_sync_source(int cpu) { + unsigned long flags; int cpus = 2; /* @@ -129,8 +130,11 @@ void __cpuinit check_tsc_sync_source(int /* * Wait for the target to arrive: */ + local_save_flags(flags); + local_irq_enable(); while (atomic_read(&start_count) != cpus-1) cpu_relax(); + local_irq_restore(flags); /* * Trigger the target to continue into the measurement too: */ patches/neptune-no-at-keyboard.patch0000664000076400007640000000350311150327144016545 0ustar tglxtglxSubject: neptune: no at keyboard From: Ingo Molnar Date: Wed Feb 04 00:03:15 CET 2009 Signed-off-by: Ingo Molnar --- drivers/input/keyboard/atkbd.c | 15 +++++++++++++++ drivers/input/mouse/psmouse-base.c | 15 +++++++++++++++ 2 files changed, 30 insertions(+) Index: linux-2.6-tip/drivers/input/keyboard/atkbd.c =================================================================== --- linux-2.6-tip.orig/drivers/input/keyboard/atkbd.c +++ linux-2.6-tip/drivers/input/keyboard/atkbd.c @@ -1556,8 +1556,23 @@ static struct dmi_system_id atkbd_dmi_qu { } }; +static int __read_mostly noatkbd; + +static int __init noatkbd_setup(char *str) +{ + noatkbd = 1; + printk(KERN_INFO "debug: not setting up AT keyboard.\n"); + + return 1; +} + +__setup("noatkbd", noatkbd_setup); + static int __init atkbd_init(void) { + if (noatkbd) + return 0; + dmi_check_system(atkbd_dmi_quirk_table); return serio_register_driver(&atkbd_drv); Index: linux-2.6-tip/drivers/input/mouse/psmouse-base.c =================================================================== --- linux-2.6-tip.orig/drivers/input/mouse/psmouse-base.c +++ linux-2.6-tip/drivers/input/mouse/psmouse-base.c @@ -1645,10 +1645,25 @@ static int psmouse_get_maxproto(char *bu return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); } +static int __read_mostly nopsmouse; + +static int __init nopsmouse_setup(char *str) +{ + nopsmouse = 1; + printk(KERN_INFO "debug: not setting up psmouse.\n"); + + return 1; +} + +__setup("nopsmouse", nopsmouse_setup); + static int __init psmouse_init(void) { int err; + if (nopsmouse) + return 0; + kpsmoused_wq = create_singlethread_workqueue("kpsmoused"); if (!kpsmoused_wq) { printk(KERN_ERR "psmouse: failed to create kpsmoused workqueue\n"); patches/rtmutex-debug.h-cleanup.patch0000664000076400007640000000277611150327144016737 0ustar tglxtglxSubject: lock debugging: clean up rtmutex-debug.h From: Ingo Molnar style cleanups. Signed-off-by: Ingo Molnar --- kernel/rtmutex-debug.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) Index: linux-2.6-tip/kernel/rtmutex-debug.h =================================================================== --- linux-2.6-tip.orig/kernel/rtmutex-debug.h +++ linux-2.6-tip/kernel/rtmutex-debug.h @@ -17,17 +17,17 @@ extern void debug_rt_mutex_free_waiter(s extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); extern void debug_rt_mutex_lock(struct rt_mutex *lock); extern void debug_rt_mutex_unlock(struct rt_mutex *lock); -extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, - struct task_struct *powner); +extern void +debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner); extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, struct rt_mutex *lock); extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); -# define debug_rt_mutex_reset_waiter(w) \ +# define debug_rt_mutex_reset_waiter(w) \ do { (w)->deadlock_lock = NULL; } while (0) -static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, - int detect) +static inline int +debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, int detect) { - return (waiter != NULL); + return waiter != NULL; } patches/netpoll-8139too-fix.patch0000664000076400007640000000141111150327144015636 0ustar tglxtglxSubject: netpoll: 8139too fix From: Ingo Molnar Date: Wed Feb 04 00:03:14 CET 2009 Signed-off-by: Ingo Molnar --- drivers/net/8139too.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) Index: linux-2.6-tip/drivers/net/8139too.c =================================================================== --- linux-2.6-tip.orig/drivers/net/8139too.c +++ linux-2.6-tip/drivers/net/8139too.c @@ -2209,7 +2209,11 @@ static irqreturn_t rtl8139_interrupt (in */ static void rtl8139_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); + /* + * use _nosync() variant - might be used by netconsole + * from atomic contexts: + */ + disable_irq_nosync(dev->irq); rtl8139_interrupt(dev->irq, dev); enable_irq(dev->irq); } patches/kprobes-preempt-fix.patch0000664000076400007640000000350611160372121016157 0ustar tglxtglxSubject: patches/kprobes-preempt-fix.patch Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) Index: linux-2.6-tip/arch/x86/kernel/kprobes.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/kprobes.c +++ linux-2.6-tip/arch/x86/kernel/kprobes.c @@ -454,7 +454,7 @@ static void __kprobes setup_singlestep(s /* Boost up -- we can execute copied instructions directly */ reset_current_kprobe(); regs->ip = (unsigned long)p->ainsn.insn; - preempt_enable_no_resched(); + preempt_enable(); return; } #endif @@ -480,7 +480,7 @@ static int __kprobes reenter_kprobe(stru arch_disarm_kprobe(p); regs->ip = (unsigned long)p->addr; reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); break; #endif case KPROBE_HIT_ACTIVE: @@ -576,7 +576,7 @@ static int __kprobes kprobe_handler(stru } } /* else: not a kprobe fault; let the kernel handle it */ - preempt_enable_no_resched(); + preempt_enable(); return 0; } @@ -875,7 +875,7 @@ static int __kprobes post_kprobe_handler } reset_current_kprobe(); out: - preempt_enable_no_resched(); + preempt_enable(); /* * if somebody else is singlestepping across a probe point, flags @@ -909,7 +909,7 @@ int __kprobes kprobe_fault_handler(struc restore_previous_kprobe(kcb); else reset_current_kprobe(); - preempt_enable_no_resched(); + preempt_enable(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: @@ -1050,7 +1050,7 @@ int __kprobes longjmp_break_handler(stru memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), kcb->jprobes_stack, MIN_STACK_SIZE(kcb->jprobe_saved_sp)); - preempt_enable_no_resched(); + preempt_enable(); return 1; } return 0; patches/replace-bugon-by-warn-on.patch0000664000076400007640000000136211156214150016766 0ustar tglxtglxSubject: replace: bugon by warn on From: Ingo Molnar Date: Wed Feb 04 00:03:14 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/mm/highmem_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/arch/x86/mm/highmem_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/mm/highmem_32.c +++ linux-2.6-tip/arch/x86/mm/highmem_32.c @@ -85,7 +85,7 @@ void *kmap_atomic_prot(struct page *page idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte-idx))); + WARN_ON_ONCE(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); arch_flush_lazy_mmu_mode(); patches/i386-mark-atomic-irq-ops-raw.patch0000664000076400007640000000143411150327144017330 0ustar tglxtglxSubject: i386: mark atomic irq ops raw From: Ingo Molnar Date: Wed Feb 04 00:03:14 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic_32.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-2.6-tip/arch/x86/include/asm/atomic_32.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/atomic_32.h +++ linux-2.6-tip/arch/x86/include/asm/atomic_32.h @@ -180,10 +180,10 @@ static inline int atomic_add_return(int #ifdef CONFIG_M386 no_xadd: /* Legacy 386 processor */ - local_irq_save(flags); + raw_local_irq_save(flags); __i = atomic_read(v); atomic_set(v, i + __i); - local_irq_restore(flags); + raw_local_irq_restore(flags); return i + __i; #endif } patches/msi-suspend-resume-workaround.patch0000664000076400007640000000115511150327757020226 0ustar tglxtglxSubject: msi: suspend resume workaround From: Ingo Molnar Date: Wed Feb 04 00:03:13 CET 2009 Signed-off-by: Ingo Molnar --- drivers/pci/msi.c | 4 ++++ 1 file changed, 4 insertions(+) Index: linux-2.6-tip/drivers/pci/msi.c =================================================================== --- linux-2.6-tip.orig/drivers/pci/msi.c +++ linux-2.6-tip/drivers/pci/msi.c @@ -323,6 +323,10 @@ static void __pci_restore_msi_state(stru return; entry = get_irq_msi(dev->irq); + if (!entry) { + WARN_ON(1); + return; + } pos = entry->msi_attrib.pos; pci_intx_for_msi(dev, 0); patches/floppy-resume-fix.patch0000664000076400007640000000410511150327757015661 0ustar tglxtglxSubject: floppy: suspend/resume fix From: Ingo Molnar introduce a floppy platform-driver and suspend/resume ops to stop/start the floppy driver. Bug reported by Mikael Pettersson. Signed-off-by: Ingo Molnar --- drivers/block/floppy.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) Index: linux-2.6-tip/drivers/block/floppy.c =================================================================== --- linux-2.6-tip.orig/drivers/block/floppy.c +++ linux-2.6-tip/drivers/block/floppy.c @@ -4148,6 +4148,28 @@ static void floppy_device_release(struct { } +static int floppy_suspend(struct platform_device *dev, pm_message_t state) +{ + floppy_release_irq_and_dma(); + + return 0; +} + +static int floppy_resume(struct platform_device *dev) +{ + floppy_grab_irq_and_dma(); + + return 0; +} + +static struct platform_driver floppy_driver = { + .suspend = floppy_suspend, + .resume = floppy_resume, + .driver = { + .name = "floppy", + }, +}; + static struct platform_device floppy_device[N_DRIVE]; static struct kobject *floppy_find(dev_t dev, int *part, void *data) @@ -4196,10 +4218,14 @@ static int __init floppy_init(void) if (err) goto out_put_disk; + err = platform_driver_register(&floppy_driver); + if (err) + goto out_unreg_blkdev; + floppy_queue = blk_init_queue(do_fd_request, &floppy_lock); if (!floppy_queue) { err = -ENOMEM; - goto out_unreg_blkdev; + goto out_unreg_driver; } blk_queue_max_sectors(floppy_queue, 64); @@ -4346,6 +4372,8 @@ out_flush_work: out_unreg_region: blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); blk_cleanup_queue(floppy_queue); +out_unreg_driver: + platform_driver_unregister(&floppy_driver); out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); out_put_disk: @@ -4567,6 +4595,7 @@ static void __exit floppy_module_exit(vo blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); unregister_blkdev(FLOPPY_MAJOR, "fd"); + platform_driver_unregister(&floppy_driver); for (drive = 0; drive < N_DRIVE; drive++) { del_timer_sync(&motor_off_timer[drive]); patches/ioapic-fix-too-fast-clocks.patch0000664000076400007640000000306011160372121017305 0ustar tglxtglxSubject: patches/ioapic-fix-too-fast-clocks.patch From: Akira Tsukamoto This one line patch adds upper bound testing inside timer_irq_works() when evaluating whether irq timer works or not on boot up. It fix the machines having problem with clock running too fast. What this patch do is, if timer interrupts running too fast through IO-APIC IRQ then false back to i8259A IRQ. I really appreciate for the feedback from ATI Xpress 200 chipset user, It should eliminate the needs of adding no_timer_check on kernel options. I have NEC laptop using ATI Xpress 200 chipset with Pentium M 1.8GHz and its clock keep going forward when kernel compiled with local APIC support. Many machines based on RS200 chipset seem to have the same problem, including Acer Ferrari 400X AMD notebook or Compaq R4000. Also I would like to have comments on upper bound limit, 16 ticks, which I chose in this patch. My laptop always reports around 20, which is double from normal. arch/x86/kernel/apic/io_apic.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) Signed-off-by: Ingo Molnar Index: linux-2.6-tip/arch/x86/kernel/apic/io_apic.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/apic/io_apic.c +++ linux-2.6-tip/arch/x86/kernel/apic/io_apic.c @@ -2251,8 +2251,10 @@ static int __init timer_irq_works(void) */ /* jiffies wrap? */ - if (time_after(jiffies, t1 + 4)) + if (time_after(jiffies, t1 + 4) && + time_before(jiffies, t1 + 16)) return 1; + return 0; } patches/move-native-irq.patch0000664000076400007640000000212311150327144015275 0ustar tglxtglxSubject: move: native irq From: Ingo Molnar Date: Wed Feb 04 00:03:12 CET 2009 Signed-off-by: Ingo Molnar --- kernel/irq/migration.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) Index: linux-2.6-tip/kernel/irq/migration.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/migration.c +++ linux-2.6-tip/kernel/irq/migration.c @@ -50,6 +50,7 @@ void move_masked_irq(int irq) void move_native_irq(int irq) { struct irq_desc *desc = irq_to_desc(irq); + int mask = 1; if (likely(!(desc->status & IRQ_MOVE_PENDING))) return; @@ -57,8 +58,17 @@ void move_native_irq(int irq) if (unlikely(desc->status & IRQ_DISABLED)) return; - desc->chip->mask(irq); + /* + * If the irq is already in progress, it should be masked. + * If we unmask it, we might cause an interrupt storm on RT. + */ + if (unlikely(desc->status & IRQ_INPROGRESS)) + mask = 0; + + if (mask) + desc->chip->mask(irq); move_masked_irq(irq); - desc->chip->unmask(irq); + if (mask) + desc->chip->unmask(irq); } patches/dont-unmask-io_apic.patch0000664000076400007640000000151211160372120016107 0ustar tglxtglxSubject: dont: unmask io_apic From: Ingo Molnar Date: Wed Feb 04 00:03:12 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/io_apic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) Index: linux-2.6-tip/arch/x86/kernel/apic/io_apic.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/apic/io_apic.c +++ linux-2.6-tip/arch/x86/kernel/apic/io_apic.c @@ -2569,7 +2569,8 @@ static void ack_apic_level(unsigned int irq_complete_move(&desc); #ifdef CONFIG_GENERIC_PENDING_IRQ /* If we are moving the irq we need to mask it */ - if (unlikely(desc->status & IRQ_MOVE_PENDING)) { + if (unlikely(desc->status & IRQ_MOVE_PENDING) && + !(desc->status & IRQ_INPROGRESS)) { do_unmask_irq = 1; mask_IO_APIC_irq_desc(desc); } patches/gcc-warnings-shut-up.patch0000664000076400007640000000122311150327144016237 0ustar tglxtglxSubject: patches/gcc-warnings-shut-up.patch Signed-off-by: Ingo Molnar --- net/core/flow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/net/core/flow.c =================================================================== --- linux-2.6-tip.orig/net/core/flow.c +++ linux-2.6-tip/net/core/flow.c @@ -168,7 +168,7 @@ static int flow_key_compare(struct flowi void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, flow_resolve_t resolver) { - struct flow_cache_entry *fle, **head; + struct flow_cache_entry *fle, **head = NULL /* shut up GCC */; unsigned int hash; int cpu; patches/nfs-stats-miss-preemption.patch0000664000076400007640000000227111150327144017331 0ustar tglxtglxSubject: nfs: fix missing preemption check From: Thomas Gleixner Date: Sun, 27 Jul 2008 00:54:19 +0200 NFS iostats use get_cpu()/put_cpu_no_preempt(). That misses a preemption check for no good reason and introduces long latencies when a wakeup of a higher priority task happens in the preempt disabled region. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- fs/nfs/iostat.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-2.6-tip/fs/nfs/iostat.h =================================================================== --- linux-2.6-tip.orig/fs/nfs/iostat.h +++ linux-2.6-tip/fs/nfs/iostat.h @@ -28,7 +28,7 @@ static inline void nfs_inc_server_stats( cpu = get_cpu(); iostats = per_cpu_ptr(server->io_stats, cpu); iostats->events[stat]++; - put_cpu_no_resched(); + put_cpu(); } static inline void nfs_inc_stats(const struct inode *inode, @@ -47,7 +47,7 @@ static inline void nfs_add_server_stats( cpu = get_cpu(); iostats = per_cpu_ptr(server->io_stats, cpu); iostats->bytes[stat] += addend; - put_cpu_no_resched(); + put_cpu(); } static inline void nfs_add_stats(const struct inode *inode, patches/random-driver-latency-fix.patch0000664000076400007640000000203311150327144017244 0ustar tglxtglxSubject: patches/random-driver-latency-fix.patch Signed-off-by: Ingo Molnar --- drivers/char/random.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) Index: linux-2.6-tip/drivers/char/random.c =================================================================== --- linux-2.6-tip.orig/drivers/char/random.c +++ linux-2.6-tip/drivers/char/random.c @@ -623,8 +623,11 @@ static void add_timer_randomness(struct preempt_disable(); /* if over the trickle threshold, use only 1 in 4096 samples */ if (input_pool.entropy_count > trickle_thresh && - (__get_cpu_var(trickle_count)++ & 0xfff)) - goto out; + (__get_cpu_var(trickle_count)++ & 0xfff)) { + preempt_enable(); + return; + } + preempt_enable(); sample.jiffies = jiffies; sample.cycles = get_cycles(); @@ -666,8 +669,6 @@ static void add_timer_randomness(struct credit_entropy_bits(&input_pool, min_t(int, fls(delta>>1), 11)); } -out: - preempt_enable(); } void add_input_randomness(unsigned int type, unsigned int code, patches/loopback-revert.patch0000664000076400007640000000240511150327144015354 0ustar tglxtglxSubject: patches/loopback-revert.patch revert this commit: commit 58f539740b1ccfc5ef4e509ec2efe82621b546e3 Author: Eric Dumazet Date: Fri Oct 20 00:32:41 2006 -0700 [NET]: Can use __get_cpu_var() instead of per_cpu() in loopback driver. As BHs are off in loopback_xmit(), preemption cannot occurs, so we can use __get_cpu_var() instead of per_cpu() (and avoid a preempt_enable()/preempt_disable() pair) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Ingo Molnar --- drivers/net/loopback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-2.6-tip/drivers/net/loopback.c =================================================================== --- linux-2.6-tip.orig/drivers/net/loopback.c +++ linux-2.6-tip/drivers/net/loopback.c @@ -76,11 +76,11 @@ static int loopback_xmit(struct sk_buff skb->protocol = eth_type_trans(skb,dev); - /* it's OK to use per_cpu_ptr() because BHs are off */ pcpu_lstats = dev->ml_priv; - lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); + lb_stats = per_cpu_ptr(pcpu_lstats, get_cpu()); lb_stats->bytes += skb->len; lb_stats->packets++; + put_cpu(); netif_rx(skb); patches/generic-cmpxchg-use-raw-local-irq-variant.patch0000664000076400007640000000233411150327144022225 0ustar tglxtglxSubject: generic: cmpxchg use raw local irq variant From: Ingo Molnar Date: Wed Feb 04 00:03:10 CET 2009 Signed-off-by: Ingo Molnar --- include/asm-generic/cmpxchg-local.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) Index: linux-2.6-tip/include/asm-generic/cmpxchg-local.h =================================================================== --- linux-2.6-tip.orig/include/asm-generic/cmpxchg-local.h +++ linux-2.6-tip/include/asm-generic/cmpxchg-local.h @@ -20,7 +20,7 @@ static inline unsigned long __cmpxchg_lo if (size == 8 && sizeof(unsigned long) != 8) wrong_size_cmpxchg(ptr); - local_irq_save(flags); + raw_local_irq_save(flags); switch (size) { case 1: prev = *(u8 *)ptr; if (prev == old) @@ -41,7 +41,7 @@ static inline unsigned long __cmpxchg_lo default: wrong_size_cmpxchg(ptr); } - local_irq_restore(flags); + raw_local_irq_restore(flags); return prev; } @@ -54,11 +54,11 @@ static inline u64 __cmpxchg64_local_gene u64 prev; unsigned long flags; - local_irq_save(flags); + raw_local_irq_save(flags); prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = new; - local_irq_restore(flags); + raw_local_irq_restore(flags); return prev; } patches/preempt-softirqs-core.patch0000664000076400007640000004156611160544613016545 0ustar tglxtglxSubject: preempt: softirqs core From: Ingo Molnar Date: Wed Feb 04 00:03:10 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/interrupt.h | 13 ++ include/linux/sched.h | 15 ++ kernel/Kconfig.preempt | 16 ++ kernel/sched.c | 28 ++++- kernel/softirq.c | 257 +++++++++++++++++++++++++++++++++++++--------- 5 files changed, 276 insertions(+), 53 deletions(-) Index: linux-2.6-tip/include/linux/interrupt.h =================================================================== --- linux-2.6-tip.orig/include/linux/interrupt.h +++ linux-2.6-tip/include/linux/interrupt.h @@ -265,6 +265,8 @@ enum SCHED_SOFTIRQ, HRTIMER_SOFTIRQ, RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ + /* Entries after this are ignored in split softirq mode */ + MAX_SOFTIRQ, NR_SOFTIRQS }; @@ -283,13 +285,16 @@ struct softirq_action void (*action)(struct softirq_action *); }; +#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) +#define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr) + asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); -#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +extern void wakeup_irqd(void); /* This is the worklist that queues up per-cpu softirq work. * @@ -299,6 +304,11 @@ extern void raise_softirq(unsigned int n * only be accessed by the local cpu that they are for. */ DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); +#ifdef CONFIG_PREEMPT_SOFTIRQS +extern void wait_for_softirq(int softirq); +#else +# define wait_for_softirq(x) do {} while(0) +#endif /* Try to send a softirq to a remote cpu. If this cannot be done, the * work will be queued to the local cpu. @@ -435,6 +445,7 @@ extern void tasklet_kill(struct tasklet_ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); +void takeover_tasklets(unsigned int cpu); /* * Autoprobing for irqs: Index: linux-2.6-tip/include/linux/sched.h =================================================================== --- linux-2.6-tip.orig/include/linux/sched.h +++ linux-2.6-tip/include/linux/sched.h @@ -92,6 +92,12 @@ struct sched_param { #include +#ifdef CONFIG_PREEMPT_SOFTIRQS +extern int softirq_preemption; +#else +# define softirq_preemption 0 +#endif + struct mem_cgroup; struct exec_domain; struct futex_pi_state; @@ -1642,6 +1648,7 @@ extern cputime_t task_gtime(struct task_ #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ +#define PF_SOFTIRQ 0x08000000 /* softirq context */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ @@ -2229,6 +2236,7 @@ static inline int cond_resched_bkl(void) { return _cond_resched(); } +extern int cond_resched_softirq_context(void); /* * Does a critical section need to be broken due to another @@ -2261,6 +2269,13 @@ static inline void thread_group_cputime_ { } +static inline int softirq_need_resched(void) +{ + if (softirq_preemption && (current->flags & PF_SOFTIRQ)) + return need_resched(); + return 0; +} + /* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. Index: linux-2.6-tip/kernel/Kconfig.preempt =================================================================== --- linux-2.6-tip.orig/kernel/Kconfig.preempt +++ linux-2.6-tip/kernel/Kconfig.preempt @@ -52,3 +52,19 @@ config PREEMPT endchoice +config PREEMPT_SOFTIRQS + bool "Thread Softirqs" + default n +# depends on PREEMPT + help + This option reduces the latency of the kernel by 'threading' + soft interrupts. This means that all softirqs will execute + in softirqd's context. While this helps latency, it can also + reduce performance. + + The threading of softirqs can also be controlled via + /proc/sys/kernel/softirq_preemption runtime flag and the + sofirq-preempt=0/1 boot-time option. + + Say N if you are unsure. + Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -4445,7 +4445,7 @@ void account_system_time(struct task_str tmp = cputime_to_cputime64(cputime); if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); - else if (softirq_count()) + else if (softirq_count() || (p->flags & PF_SOFTIRQ)) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); else cpustat->system = cputime64_add(cpustat->system, tmp); @@ -4966,7 +4966,7 @@ asmlinkage void __sched preempt_schedule int saved_lock_depth; /* Catch callers which need to be fixed */ - BUG_ON(ti->preempt_count || !irqs_disabled()); + WARN_ON_ONCE(ti->preempt_count || !irqs_disabled()); do { add_preempt_count(PREEMPT_ACTIVE); @@ -6074,9 +6074,12 @@ int cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(cond_resched_lock); +/* + * Voluntarily preempt a process context that has softirqs disabled: + */ int __sched cond_resched_softirq(void) { - BUG_ON(!in_softirq()); + WARN_ON_ONCE(!in_softirq()); if (need_resched() && system_state == SYSTEM_RUNNING) { local_bh_enable(); @@ -6088,6 +6091,25 @@ int __sched cond_resched_softirq(void) } EXPORT_SYMBOL(cond_resched_softirq); +/* + * Voluntarily preempt a softirq context (possible with softirq threading): + */ +int __sched cond_resched_softirq_context(void) +{ + WARN_ON_ONCE(!in_softirq()); + + if (softirq_need_resched() && system_state == SYSTEM_RUNNING) { + raw_local_irq_disable(); + _local_bh_enable(); + raw_local_irq_enable(); + __cond_resched(); + local_bh_disable(); + return 1; + } + return 0; +} +EXPORT_SYMBOL(cond_resched_softirq_context); + /** * yield - yield the current processor to other threads. * Index: linux-2.6-tip/kernel/softirq.c =================================================================== --- linux-2.6-tip.orig/kernel/softirq.c +++ linux-2.6-tip/kernel/softirq.c @@ -8,9 +8,15 @@ * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) * * Remote softirq infrastructure is by Jens Axboe. + * + * Softirq-split implemetation by + * Copyright (C) 2005 Thomas Gleixner, Ingo Molnar */ #include +#include +#include +#include #include #include #include @@ -52,7 +58,43 @@ EXPORT_SYMBOL(irq_stat); static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; -static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); +struct softirqdata { + int nr; + unsigned long cpu; + struct task_struct *tsk; +#ifdef CONFIG_PREEMPT_SOFTIRQS + wait_queue_head_t wait; + int running; +#endif +}; + +static DEFINE_PER_CPU(struct softirqdata [MAX_SOFTIRQ], ksoftirqd); + +#ifdef CONFIG_PREEMPT_SOFTIRQS +/* + * Preempting the softirq causes cases that would not be a + * problem when the softirq is not preempted. That is a + * process may have code to spin while waiting for a softirq + * to finish on another CPU. But if it happens that the + * process has preempted the softirq, this could cause a + * deadlock. + */ +void wait_for_softirq(int softirq) +{ + struct softirqdata *data = &__get_cpu_var(ksoftirqd)[softirq]; + + if (data->running) { + DECLARE_WAITQUEUE(wait, current); + + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&data->wait, &wait); + if (data->running) + schedule(); + remove_wait_queue(&data->wait, &wait); + __set_current_state(TASK_RUNNING); + } +} +#endif char *softirq_to_name[NR_SOFTIRQS] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", @@ -65,16 +107,32 @@ char *softirq_to_name[NR_SOFTIRQS] = { * to the pending events, so lets the scheduler to balance * the softirq load for us. */ -static inline void wakeup_softirqd(void) +static void wakeup_softirqd(int softirq) { /* Interrupts are disabled: no need to stop preemption */ - struct task_struct *tsk = __get_cpu_var(ksoftirqd); + struct task_struct *tsk = __get_cpu_var(ksoftirqd)[softirq].tsk; if (tsk && tsk->state != TASK_RUNNING) wake_up_process(tsk); } /* + * Wake up the softirq threads which have work + */ +static void trigger_softirqs(void) +{ + u32 pending = local_softirq_pending(); + int curr = 0; + + while (pending) { + if (pending & 1) + wakeup_softirqd(curr); + pending >>= 1; + curr++; + } +} + +/* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ @@ -189,7 +247,7 @@ EXPORT_SYMBOL(local_bh_enable_ip); DEFINE_TRACE(softirq_entry); DEFINE_TRACE(softirq_exit); -asmlinkage void __do_softirq(void) +asmlinkage void ___do_softirq(void) { struct softirq_action *h; __u32 pending; @@ -199,9 +257,6 @@ asmlinkage void __do_softirq(void) pending = local_softirq_pending(); account_system_vtime(current); - __local_bh_disable((unsigned long)__builtin_return_address(0)); - lockdep_softirq_enter(); - cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ @@ -228,6 +283,7 @@ restart: } rcu_bh_qsctr_inc(cpu); + cond_resched_softirq_context(); } h++; pending >>= 1; @@ -240,12 +296,34 @@ restart: goto restart; if (pending) - wakeup_softirqd(); + trigger_softirqs(); +} + +asmlinkage void __do_softirq(void) +{ +#ifdef CONFIG_PREEMPT_SOFTIRQS + /* + * 'preempt harder'. Push all softirq processing off to ksoftirqd. + */ + if (softirq_preemption) { + if (local_softirq_pending()) + trigger_softirqs(); + return; + } +#endif + /* + * 'immediate' softirq execution: + */ + __local_bh_disable((unsigned long)__builtin_return_address(0)); + lockdep_softirq_enter(); + + ___do_softirq(); lockdep_softirq_exit(); account_system_vtime(current); _local_bh_enable(); + } #ifndef __ARCH_HAS_DO_SOFTIRQ @@ -316,19 +394,11 @@ void irq_exit(void) */ inline void raise_softirq_irqoff(unsigned int nr) { - __raise_softirq_irqoff(nr); + __do_raise_softirq_irqoff(nr); - /* - * If we're in an interrupt or softirq, we're done - * (this also catches softirq-disabled code). We will - * actually run the softirq once we return from - * the irq or softirq. - * - * Otherwise we wake up ksoftirqd to make sure we - * schedule the softirq soon. - */ - if (!in_interrupt()) - wakeup_softirqd(); +#ifdef CONFIG_PREEMPT_SOFTIRQS + wakeup_softirqd(nr); +#endif } void raise_softirq(unsigned int nr) @@ -424,7 +494,7 @@ static void tasklet_action(struct softir t->next = NULL; *__get_cpu_var(tasklet_vec).tail = t; __get_cpu_var(tasklet_vec).tail = &(t->next); - __raise_softirq_irqoff(TASKLET_SOFTIRQ); + __do_raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_enable(); } } @@ -459,7 +529,7 @@ static void tasklet_hi_action(struct sof t->next = NULL; *__get_cpu_var(tasklet_hi_vec).tail = t; __get_cpu_var(tasklet_hi_vec).tail = &(t->next); - __raise_softirq_irqoff(HI_SOFTIRQ); + __do_raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } } @@ -635,13 +705,24 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } -static int ksoftirqd(void * __bind_cpu) +static int ksoftirqd(void * __data) { + struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2 }; + struct softirqdata *data = __data; + u32 mask = (1 << data->nr); + struct softirq_action *h; + +#ifdef CONFIG_PREEMPT_SOFTIRQS + init_waitqueue_head(&data->wait); +#endif + + sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m); + current->flags |= PF_SOFTIRQ; set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { preempt_disable(); - if (!local_softirq_pending()) { + if (!(local_softirq_pending() & mask)) { preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -649,20 +730,42 @@ static int ksoftirqd(void * __bind_cpu) __set_current_state(TASK_RUNNING); - while (local_softirq_pending()) { +#ifdef CONFIG_PREEMPT_SOFTIRQS + data->running = 1; +#endif + + while (local_softirq_pending() & mask) { /* Preempt disable stops cpu going offline. If already offline, we'll be on wrong CPU: don't process */ - if (cpu_is_offline((long)__bind_cpu)) + if (cpu_is_offline(data->cpu)) goto wait_to_die; - do_softirq(); + + local_irq_disable(); preempt_enable_no_resched(); + set_softirq_pending(local_softirq_pending() & ~mask); + local_bh_disable(); + local_irq_enable(); + + h = &softirq_vec[data->nr]; + if (h) + h->action(h); + rcu_bh_qsctr_inc(data->cpu); + + local_irq_disable(); + _local_bh_enable(); + local_irq_enable(); + cond_resched(); preempt_disable(); - rcu_qsctr_inc((long)__bind_cpu); + rcu_qsctr_inc(data->cpu); } preempt_enable(); set_current_state(TASK_INTERRUPTIBLE); +#ifdef CONFIG_PREEMPT_SOFTIRQS + data->running = 0; + wake_up(&data->wait); +#endif } __set_current_state(TASK_RUNNING); return 0; @@ -712,7 +815,7 @@ void tasklet_kill_immediate(struct taskl BUG(); } -static void takeover_tasklets(unsigned int cpu) +void takeover_tasklets(unsigned int cpu) { /* CPU is dead, so no lock needed. */ local_irq_disable(); @@ -738,49 +841,82 @@ static void takeover_tasklets(unsigned i } #endif /* CONFIG_HOTPLUG_CPU */ +static const char *softirq_names [] = +{ + [HI_SOFTIRQ] = "high", + [SCHED_SOFTIRQ] = "sched", + [TIMER_SOFTIRQ] = "timer", + [NET_TX_SOFTIRQ] = "net-tx", + [NET_RX_SOFTIRQ] = "net-rx", + [BLOCK_SOFTIRQ] = "block", + [TASKLET_SOFTIRQ] = "tasklet", +#ifdef CONFIG_HIGH_RES_TIMERS + [HRTIMER_SOFTIRQ] = "hrtimer", +#endif + [RCU_SOFTIRQ] = "rcu", +}; + static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - int hotcpu = (unsigned long)hcpu; + int hotcpu = (unsigned long)hcpu, i; struct task_struct *p; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); - if (IS_ERR(p)) { - printk("ksoftirqd for %i failed\n", hotcpu); - return NOTIFY_BAD; - } - kthread_bind(p, hotcpu); - per_cpu(ksoftirqd, hotcpu) = p; - break; + for (i = 0; i < MAX_SOFTIRQ; i++) { + per_cpu(ksoftirqd, hotcpu)[i].nr = i; + per_cpu(ksoftirqd, hotcpu)[i].cpu = hotcpu; + per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; + } + for (i = 0; i < MAX_SOFTIRQ; i++) { + p = kthread_create(ksoftirqd, + &per_cpu(ksoftirqd, hotcpu)[i], + "softirq-%s/%d", softirq_names[i], + hotcpu); + if (IS_ERR(p)) { + printk("ksoftirqd %d for %i failed\n", i, + hotcpu); + return NOTIFY_BAD; + } + kthread_bind(p, hotcpu); + per_cpu(ksoftirqd, hotcpu)[i].tsk = p; + } + break; + break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - wake_up_process(per_cpu(ksoftirqd, hotcpu)); + for (i = 0; i < MAX_SOFTIRQ; i++) + wake_up_process(per_cpu(ksoftirqd, hotcpu)[i].tsk); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - if (!per_cpu(ksoftirqd, hotcpu)) - break; - /* Unbind so it can run. Fall thru. */ - kthread_bind(per_cpu(ksoftirqd, hotcpu), - cpumask_any(cpu_online_mask)); +#if 0 + for (i = 0; i < MAX_SOFTIRQ; i++) { + if (!per_cpu(ksoftirqd, hotcpu)[i].tsk) + continue; + kthread_bind(per_cpu(ksoftirqd, hotcpu)[i].tsk, + cpumask_any(cpu_online_mask)); + } +#endif case CPU_DEAD: case CPU_DEAD_FROZEN: { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - p = per_cpu(ksoftirqd, hotcpu); - per_cpu(ksoftirqd, hotcpu) = NULL; - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); - kthread_stop(p); + for (i = 0; i < MAX_SOFTIRQ; i++) { + p = per_cpu(ksoftirqd, hotcpu)[i].tsk; + per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; + sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); + kthread_stop(p); + } takeover_tasklets(hotcpu); break; } #endif /* CONFIG_HOTPLUG_CPU */ - } + } return NOTIFY_OK; } @@ -800,6 +936,29 @@ static __init int spawn_ksoftirqd(void) } early_initcall(spawn_ksoftirqd); + +#ifdef CONFIG_PREEMPT_SOFTIRQS + +int softirq_preemption = 1; + +EXPORT_SYMBOL(softirq_preemption); + +static int __init softirq_preempt_setup (char *str) +{ + if (!strncmp(str, "off", 3)) + softirq_preemption = 0; + else + get_option(&str, &softirq_preemption); + if (!softirq_preemption) + printk("turning off softirq preemption!\n"); + + return 1; +} + +__setup("softirq-preempt=", softirq_preempt_setup); + +#endif + #ifdef CONFIG_SMP /* * Call a function on all processors patches/preempt-irqs-core.patch0000664000076400007640000006567411160544613015657 0ustar tglxtglxSubject: preempt: irqs core From: Ingo Molnar Date: Wed Feb 04 00:03:09 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/interrupt.h | 16 +- include/linux/irq.h | 26 +++- include/linux/sched.h | 15 ++ init/main.c | 5 kernel/irq/autoprobe.c | 1 kernel/irq/chip.c | 36 +++++ kernel/irq/handle.c | 44 ++++++ kernel/irq/internals.h | 4 kernel/irq/manage.c | 293 +++++++++++++++++++++++++++++++++++++++++++++- kernel/irq/proc.c | 131 ++++++++++++++------ kernel/irq/spurious.c | 12 + kernel/sched.c | 23 +++ 12 files changed, 549 insertions(+), 57 deletions(-) Index: linux-2.6-tip/include/linux/interrupt.h =================================================================== --- linux-2.6-tip.orig/include/linux/interrupt.h +++ linux-2.6-tip/include/linux/interrupt.h @@ -54,10 +54,12 @@ #define IRQF_SAMPLE_RANDOM 0x00000040 #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 -#define IRQF_TIMER 0x00000200 +#define __IRQF_TIMER 0x00000200 #define IRQF_PERCPU 0x00000400 #define IRQF_NOBALANCING 0x00000800 #define IRQF_IRQPOLL 0x00001000 +#define IRQF_NODELAY 0x00002000 +#define IRQF_TIMER (__IRQF_TIMER | IRQF_NODELAY) typedef irqreturn_t (*irq_handler_t)(int, void *); @@ -80,7 +82,7 @@ struct irqaction { void *dev_id; struct irqaction *next; int irq; - struct proc_dir_entry *dir; + struct proc_dir_entry *dir, *threaded; }; extern irqreturn_t no_action(int cpl, void *dev_id); @@ -235,6 +237,7 @@ static inline int disable_irq_wake(unsig #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) +// FIXME: PREEMPT_RT: set_bit()? #define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif @@ -285,8 +288,13 @@ struct softirq_action void (*action)(struct softirq_action *); }; -#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) -#define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr) +#ifdef CONFIG_PREEMPT_HARDIRQS +# define __raise_softirq_irqoff(nr) raise_softirq_irqoff(nr) +# define __do_raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) +#else +# define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) +# define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr) +#endif asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); Index: linux-2.6-tip/include/linux/irq.h =================================================================== --- linux-2.6-tip.orig/include/linux/irq.h +++ linux-2.6-tip/include/linux/irq.h @@ -20,10 +20,12 @@ #include #include #include +#include #include #include #include +#include struct irq_desc; typedef void (*irq_flow_handler_t)(unsigned int irq, @@ -65,6 +67,7 @@ typedef void (*irq_flow_handler_t)(unsig #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ +#define IRQ_NODELAY 0x40000000 /* IRQ must run immediately */ #ifdef CONFIG_IRQ_PER_CPU # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) @@ -151,6 +154,9 @@ struct irq_2_iommu; * @irq_count: stats field to detect stalled irqs * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts + * @thread: Thread pointer for threaded preemptible irq handling + * @wait_for_handler: Waitqueue to wait for a running preemptible handler + * @cycles: Timestamp for stats and debugging * @lock: locking for SMP * @affinity: IRQ affinity on SMP * @cpu: cpu index useful for balancing @@ -178,6 +184,9 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; + struct task_struct *thread; + wait_queue_head_t wait_for_handler; + cycles_t timestamp; spinlock_t lock; #ifdef CONFIG_SMP cpumask_var_t affinity; @@ -410,7 +419,22 @@ extern int set_irq_msi(unsigned int irq, #define get_irq_desc_data(desc) ((desc)->handler_data) #define get_irq_desc_msi(desc) ((desc)->msi_desc) -#endif /* CONFIG_GENERIC_HARDIRQS */ +/* Early initialization of irqs */ +extern void early_init_hardirqs(void); +extern cycles_t irq_timestamp(unsigned int irq); + +#if defined(CONFIG_PREEMPT_HARDIRQS) +extern void init_hardirqs(void); +#else +static inline void init_hardirqs(void) { } +#endif + +#else /* end GENERIC HARDIRQS */ + +static inline void early_init_hardirqs(void) { } +static inline void init_hardirqs(void) { } + +#endif /* !CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ Index: linux-2.6-tip/include/linux/sched.h =================================================================== --- linux-2.6-tip.orig/include/linux/sched.h +++ linux-2.6-tip/include/linux/sched.h @@ -98,6 +98,12 @@ extern int softirq_preemption; # define softirq_preemption 0 #endif +#ifdef CONFIG_PREEMPT_HARDIRQS +extern int hardirq_preemption; +#else +# define hardirq_preemption 0 +#endif + struct mem_cgroup; struct exec_domain; struct futex_pi_state; @@ -1629,6 +1635,7 @@ extern cputime_t task_gtime(struct task_ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_HARDIRQ 0x08000020 /* hardirq context */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ @@ -2237,6 +2244,7 @@ static inline int cond_resched_bkl(void) return _cond_resched(); } extern int cond_resched_softirq_context(void); +extern int cond_resched_hardirq_context(void); /* * Does a critical section need to be broken due to another @@ -2276,6 +2284,13 @@ static inline int softirq_need_resched(v return 0; } +static inline int hardirq_need_resched(void) +{ + if (hardirq_preemption && (current->flags & PF_HARDIRQ)) + return need_resched(); + return 0; +} + /* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. Index: linux-2.6-tip/init/main.c =================================================================== --- linux-2.6-tip.orig/init/main.c +++ linux-2.6-tip/init/main.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -583,8 +584,10 @@ asmlinkage void __init start_kernel(void * fragile until we cpu_idle() for the first time. */ preempt_disable(); + build_all_zonelists(); page_alloc_init(); + early_init_hardirqs(); printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); parse_early_param(); parse_args("Booting kernel", static_command_line, __start___param, @@ -863,6 +866,8 @@ static int __init kernel_init(void * unu smp_prepare_cpus(setup_max_cpus); + init_hardirqs(); + do_pre_smp_initcalls(); start_boot_trace(); Index: linux-2.6-tip/kernel/irq/autoprobe.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/autoprobe.c +++ linux-2.6-tip/kernel/irq/autoprobe.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include Index: linux-2.6-tip/kernel/irq/chip.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/chip.c +++ linux-2.6-tip/kernel/irq/chip.c @@ -293,7 +293,9 @@ static inline void mask_ack_irq(struct i if (desc->chip->mask_ack) desc->chip->mask_ack(irq); else { - desc->chip->mask(irq); + if (desc->chip->ack) + if (desc->chip->mask) + desc->chip->mask(irq); if (desc->chip->ack) desc->chip->ack(irq); } @@ -319,8 +321,10 @@ handle_simple_irq(unsigned int irq, stru spin_lock(&desc->lock); - if (unlikely(desc->status & IRQ_INPROGRESS)) + if (unlikely(desc->status & IRQ_INPROGRESS)) { + desc->status |= IRQ_PENDING; goto out_unlock; + } desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); kstat_incr_irqs_this_cpu(irq, desc); @@ -329,6 +333,11 @@ handle_simple_irq(unsigned int irq, stru goto out_unlock; desc->status |= IRQ_INPROGRESS; + /* + * hardirq redirection to the irqd process context: + */ + if (redirect_hardirq(desc)) + goto out_unlock; spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); @@ -375,6 +384,13 @@ handle_level_irq(unsigned int irq, struc goto out_unlock; desc->status |= IRQ_INPROGRESS; + + /* + * hardirq redirection to the irqd process context: + */ + if (redirect_hardirq(desc)) + goto out_unlock; + spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); @@ -427,6 +443,15 @@ handle_fasteoi_irq(unsigned int irq, str } desc->status |= IRQ_INPROGRESS; + /* + * In the threaded case we fall back to a mask+eoi sequence: + */ + if (redirect_hardirq(desc)) { + if (desc->chip->mask) + desc->chip->mask(irq); + goto out; + } + desc->status &= ~IRQ_PENDING; spin_unlock(&desc->lock); @@ -439,7 +464,6 @@ handle_fasteoi_irq(unsigned int irq, str out: desc->chip->eoi(irq); desc = irq_remap_to_desc(irq, desc); - spin_unlock(&desc->lock); } @@ -488,6 +512,12 @@ handle_edge_irq(unsigned int irq, struct /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; + /* + * hardirq redirection to the irqd process context: + */ + if (redirect_hardirq(desc)) + goto out_unlock; + do { struct irqaction *action = desc->action; irqreturn_t action_ret; Index: linux-2.6-tip/kernel/irq/handle.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/handle.c +++ linux-2.6-tip/kernel/irq/handle.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -354,28 +355,61 @@ irqreturn_t handle_IRQ_event(unsigned in irqreturn_t ret, retval = IRQ_NONE; unsigned int status = 0; - WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); - - if (!(action->flags & IRQF_DISABLED)) - local_irq_enable_in_hardirq(); + /* + * Unconditionally enable interrupts for threaded + * IRQ handlers: + */ + if (!hardirq_count() || !(action->flags & IRQF_DISABLED)) + local_irq_enable(); do { + unsigned int preempt_count = preempt_count(); + trace_irq_handler_entry(irq, action); ret = action->handler(irq, action->dev_id); trace_irq_handler_exit(irq, action, ret); + + if (preempt_count() != preempt_count) { + print_symbol("BUG: unbalanced irq-handler preempt count" + " in %s!\n", + (unsigned long) action->handler); + printk("entered with %08x, exited with %08x.\n", + preempt_count, preempt_count()); + dump_stack(); + preempt_count() = preempt_count; + } + if (ret == IRQ_HANDLED) status |= action->flags; retval |= ret; action = action->next; } while (action); - if (status & IRQF_SAMPLE_RANDOM) + if (status & IRQF_SAMPLE_RANDOM) { + local_irq_enable(); add_interrupt_randomness(irq); + } local_irq_disable(); return retval; } +int redirect_hardirq(struct irq_desc *desc) +{ + /* + * Direct execution: + */ + if (!hardirq_preemption || (desc->status & IRQ_NODELAY) || + !desc->thread) + return 0; + + BUG_ON(!irqs_disabled()); + if (desc->thread && desc->thread->state != TASK_RUNNING) + wake_up_process(desc->thread); + + return 1; +} + #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ #ifdef CONFIG_ENABLE_WARN_DEPRECATED Index: linux-2.6-tip/kernel/irq/internals.h =================================================================== --- linux-2.6-tip.orig/kernel/irq/internals.h +++ linux-2.6-tip/kernel/irq/internals.h @@ -26,6 +26,10 @@ extern struct irq_desc **irq_desc_ptrs; extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; #endif +extern int redirect_hardirq(struct irq_desc *desc); + +void recalculate_desc_flags(struct irq_desc *desc); + #ifdef CONFIG_PROC_FS extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); extern void register_handler_proc(unsigned int irq, struct irqaction *action); Index: linux-2.6-tip/kernel/irq/manage.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/manage.c +++ linux-2.6-tip/kernel/irq/manage.c @@ -8,8 +8,10 @@ */ #include -#include #include +#include +#include +#include #include #include @@ -43,8 +45,12 @@ void synchronize_irq(unsigned int irq) * Wait until we're out of the critical section. This might * give the wrong answer due to the lack of memory barriers. */ - while (desc->status & IRQ_INPROGRESS) - cpu_relax(); + if (hardirq_preemption && !(desc->status & IRQ_NODELAY)) + wait_event(desc->wait_for_handler, + !(desc->status & IRQ_INPROGRESS)); + else + while (desc->status & IRQ_INPROGRESS) + cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ spin_lock_irqsave(&desc->lock, flags); @@ -317,6 +323,21 @@ int set_irq_wake(unsigned int irq, unsig EXPORT_SYMBOL(set_irq_wake); /* + * If any action has IRQF_NODELAY then turn IRQ_NODELAY on: + */ +void recalculate_desc_flags(struct irq_desc *desc) +{ + struct irqaction *action; + + desc->status &= ~IRQ_NODELAY; + for (action = desc->action ; action; action = action->next) + if (action->flags & IRQF_NODELAY) + desc->status |= IRQ_NODELAY; +} + +static int start_irq_thread(int irq, struct irq_desc *desc); + +/* * Internal function that tells the architecture code whether a * particular irq has been exclusively allocated or is available * for driver use. @@ -419,6 +440,9 @@ __setup_irq(unsigned int irq, struct irq rand_initialize_irq(irq); } + if (!(new->flags & IRQF_NODELAY)) + if (start_irq_thread(irq, desc)) + return -ENOMEM; /* * The following block of code has to be executed atomically */ @@ -501,6 +525,11 @@ __setup_irq(unsigned int irq, struct irq *old_ptr = new; + /* + * Propagate any possible IRQF_NODELAY flag into IRQ_NODELAY: + */ + recalculate_desc_flags(desc); + /* Reset broken irq detection when installing new handler */ desc->irq_count = 0; desc->irqs_unhandled = 0; @@ -518,7 +547,7 @@ __setup_irq(unsigned int irq, struct irq new->irq = irq; register_irq_proc(irq, desc); - new->dir = NULL; + new->dir = new->threaded = NULL; register_handler_proc(irq, new); return 0; @@ -605,6 +634,7 @@ static struct irqaction *__free_irq(unsi else desc->chip->disable(irq); } + recalculate_desc_flags(desc); spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); @@ -772,3 +802,258 @@ int request_irq(unsigned int irq, irq_ha return retval; } EXPORT_SYMBOL(request_irq); + +#ifdef CONFIG_PREEMPT_HARDIRQS + +int hardirq_preemption = 1; + +EXPORT_SYMBOL(hardirq_preemption); + +static int __init hardirq_preempt_setup (char *str) +{ + if (!strncmp(str, "off", 3)) + hardirq_preemption = 0; + else + get_option(&str, &hardirq_preemption); + if (!hardirq_preemption) + printk("turning off hardirq preemption!\n"); + + return 1; +} + +__setup("hardirq-preempt=", hardirq_preempt_setup); + + +/* + * threaded simple handler + */ +static void thread_simple_irq(irq_desc_t *desc) +{ + struct irqaction *action = desc->action; + unsigned int irq = desc - irq_desc; + irqreturn_t action_ret; + + if (action && !desc->depth) { + spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + cond_resched_hardirq_context(); + spin_lock_irq(&desc->lock); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + } + desc->status &= ~IRQ_INPROGRESS; +} + +/* + * threaded level type irq handler + */ +static void thread_level_irq(irq_desc_t *desc) +{ + unsigned int irq = desc - irq_desc; + + thread_simple_irq(desc); + if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) + desc->chip->unmask(irq); +} + +/* + * threaded fasteoi type irq handler + */ +static void thread_fasteoi_irq(irq_desc_t *desc) +{ + unsigned int irq = desc - irq_desc; + + thread_simple_irq(desc); + if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) + desc->chip->unmask(irq); +} + +/* + * threaded edge type IRQ handler + */ +static void thread_edge_irq(irq_desc_t *desc) +{ + unsigned int irq = desc - irq_desc; + + do { + struct irqaction *action = desc->action; + irqreturn_t action_ret; + + if (unlikely(!action)) { + desc->status &= ~IRQ_INPROGRESS; + desc->chip->mask(irq); + return; + } + + /* + * When another irq arrived while we were handling + * one, we could have masked the irq. + * Renable it, if it was not disabled in meantime. + */ + if (unlikely(((desc->status & (IRQ_PENDING | IRQ_MASKED)) == + (IRQ_PENDING | IRQ_MASKED)) && !desc->depth)) + desc->chip->unmask(irq); + + desc->status &= ~IRQ_PENDING; + spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + cond_resched_hardirq_context(); + spin_lock_irq(&desc->lock); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + } while ((desc->status & IRQ_PENDING) && !desc->depth); + + desc->status &= ~IRQ_INPROGRESS; +} + +/* + * threaded edge type IRQ handler + */ +static void thread_do_irq(irq_desc_t *desc) +{ + unsigned int irq = desc - irq_desc; + + do { + struct irqaction *action = desc->action; + irqreturn_t action_ret; + + if (unlikely(!action)) { + desc->status &= ~IRQ_INPROGRESS; + desc->chip->disable(irq); + return; + } + + desc->status &= ~IRQ_PENDING; + spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + cond_resched_hardirq_context(); + spin_lock_irq(&desc->lock); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + } while ((desc->status & IRQ_PENDING) && !desc->depth); + + desc->status &= ~IRQ_INPROGRESS; + desc->chip->end(irq); +} + +static void do_hardirq(struct irq_desc *desc) +{ + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + + if (!(desc->status & IRQ_INPROGRESS)) + goto out; + + if (desc->handle_irq == handle_simple_irq) + thread_simple_irq(desc); + else if (desc->handle_irq == handle_level_irq) + thread_level_irq(desc); + else if (desc->handle_irq == handle_fasteoi_irq) + thread_fasteoi_irq(desc); + else if (desc->handle_irq == handle_edge_irq) + thread_edge_irq(desc); + else + thread_do_irq(desc); + out: + spin_unlock_irqrestore(&desc->lock, flags); + + if (waitqueue_active(&desc->wait_for_handler)) + wake_up(&desc->wait_for_handler); +} + +extern asmlinkage void __do_softirq(void); + +static int do_irqd(void * __desc) +{ + struct sched_param param = { 0, }; + struct irq_desc *desc = __desc; + +#ifdef CONFIG_SMP + set_cpus_allowed(current, desc->affinity); +#endif + current->flags |= PF_NOFREEZE | PF_HARDIRQ; + + /* + * Set irq thread priority to SCHED_FIFO/50: + */ + param.sched_priority = MAX_USER_RT_PRIO/2; + + sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m); + + while (!kthread_should_stop()) { + local_irq_disable(); + set_current_state(TASK_INTERRUPTIBLE); + irq_enter(); + do_hardirq(desc); + irq_exit(); + local_irq_enable(); + cond_resched(); +#ifdef CONFIG_SMP + /* + * Did IRQ affinities change? + */ + if (!cpus_equal(current->cpus_allowed, desc->affinity)) + set_cpus_allowed(current, desc->affinity); +#endif + schedule(); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + +static int ok_to_create_irq_threads; + +static int start_irq_thread(int irq, struct irq_desc *desc) +{ + if (desc->thread || !ok_to_create_irq_threads) + return 0; + + desc->thread = kthread_create(do_irqd, desc, "IRQ-%d", irq); + if (!desc->thread) { + printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq); + return -ENOMEM; + } + + /* + * An interrupt may have come in before the thread pointer was + * stored in desc->thread; make sure the thread gets woken up in + * such a case: + */ + smp_mb(); + wake_up_process(desc->thread); + + return 0; +} + +void __init init_hardirqs(void) +{ + int i; + ok_to_create_irq_threads = 1; + + for (i = 0; i < NR_IRQS; i++) { + irq_desc_t *desc = irq_desc + i; + + if (desc->action && !(desc->status & IRQ_NODELAY)) + start_irq_thread(i, desc); + } +} + +#else + +static int start_irq_thread(int irq, struct irq_desc *desc) +{ + return 0; +} + +#endif + +void __init early_init_hardirqs(void) +{ + struct irq_desc *desc; + int i; + + for_each_irq_desc(i, desc) + init_waitqueue_head(&desc->wait_for_handler); +} Index: linux-2.6-tip/kernel/irq/proc.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/proc.c +++ linux-2.6-tip/kernel/irq/proc.c @@ -7,6 +7,8 @@ */ #include +#include +#include #include #include #include @@ -116,6 +118,9 @@ static ssize_t default_affinity_write(st goto out; } + /* create /proc/irq/prof_cpu_mask */ + create_prof_cpu_mask(root_irq_dir); + /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least @@ -160,45 +165,6 @@ static int irq_spurious_read(char *page, jiffies_to_msecs(desc->last_unhandled)); } -#define MAX_NAMELEN 128 - -static int name_unique(unsigned int irq, struct irqaction *new_action) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irqaction *action; - unsigned long flags; - int ret = 1; - - spin_lock_irqsave(&desc->lock, flags); - for (action = desc->action ; action; action = action->next) { - if ((action != new_action) && action->name && - !strcmp(new_action->name, action->name)) { - ret = 0; - break; - } - } - spin_unlock_irqrestore(&desc->lock, flags); - return ret; -} - -void register_handler_proc(unsigned int irq, struct irqaction *action) -{ - char name [MAX_NAMELEN]; - struct irq_desc *desc = irq_to_desc(irq); - - if (!desc->dir || action->dir || !action->name || - !name_unique(irq, action)) - return; - - memset(name, 0, MAX_NAMELEN); - snprintf(name, MAX_NAMELEN, "%s", action->name); - - /* create /proc/irq/1234/handler/ */ - action->dir = proc_mkdir(name, desc->dir); -} - -#undef MAX_NAMELEN - #define MAX_NAMELEN 10 void register_irq_proc(unsigned int irq, struct irq_desc *desc) @@ -232,6 +198,8 @@ void register_irq_proc(unsigned int irq, void unregister_handler_proc(unsigned int irq, struct irqaction *action) { + if (action->threaded) + remove_proc_entry(action->threaded->name, action->dir); if (action->dir) { struct irq_desc *desc = irq_to_desc(irq); @@ -247,6 +215,91 @@ static void register_default_affinity_pr #endif } +#ifndef CONFIG_PREEMPT_RT + +static int threaded_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return sprintf(page, "%c\n", + ((struct irqaction *)data)->flags & IRQF_NODELAY ? '0' : '1'); +} + +static int threaded_write_proc(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + int c; + struct irqaction *action = data; + irq_desc_t *desc = irq_to_desc(action->irq); + + if (get_user(c, buffer)) + return -EFAULT; + if (c != '0' && c != '1') + return -EINVAL; + + spin_lock_irq(&desc->lock); + + if (c == '0') + action->flags |= IRQF_NODELAY; + if (c == '1') + action->flags &= ~IRQF_NODELAY; + recalculate_desc_flags(desc); + + spin_unlock_irq(&desc->lock); + + return 1; +} + +#endif + +#define MAX_NAMELEN 128 + +static int name_unique(unsigned int irq, struct irqaction *new_action) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + + for (action = desc->action ; action; action = action->next) + if ((action != new_action) && action->name && + !strcmp(new_action->name, action->name)) + return 0; + return 1; +} + +void register_handler_proc(unsigned int irq, struct irqaction *action) +{ + char name [MAX_NAMELEN]; + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc->dir || action->dir || !action->name || + !name_unique(irq, action)) + return; + + memset(name, 0, MAX_NAMELEN); + snprintf(name, MAX_NAMELEN, "%s", action->name); + + /* create /proc/irq/1234/handler/ */ + action->dir = proc_mkdir(name, desc->dir); + + if (!action->dir) + return; +#ifndef CONFIG_PREEMPT_RT + { + struct proc_dir_entry *entry; + /* create /proc/irq/1234/handler/threaded */ + entry = create_proc_entry("threaded", 0600, action->dir); + if (!entry) + return; + entry->nlink = 1; + entry->data = (void *)action; + entry->read_proc = threaded_read_proc; + entry->write_proc = threaded_write_proc; + action->threaded = entry; + } +#endif +} + +#undef MAX_NAMELEN + void init_irq_proc(void) { unsigned int irq; Index: linux-2.6-tip/kernel/irq/spurious.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/spurious.c +++ linux-2.6-tip/kernel/irq/spurious.c @@ -14,6 +14,11 @@ #include #include +#ifdef CONFIG_X86_IO_APIC +# include +# include +#endif + static int irqfixup __read_mostly; #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) @@ -258,6 +263,12 @@ void note_interrupt(unsigned int irq, st * The interrupt is stuck */ __report_bad_irq(irq, desc, action_ret); +#ifdef CONFIG_X86_IO_APIC + if (!sis_apic_bug) { + sis_apic_bug = 1; + printk(KERN_ERR "turning off IO-APIC fast mode.\n"); + } +#else /* * Now kill the IRQ */ @@ -268,6 +279,7 @@ void note_interrupt(unsigned int irq, st mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); +#endif } desc->irqs_unhandled = 0; } Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -4443,7 +4443,7 @@ void account_system_time(struct task_str /* Add system time to cpustat. */ tmp = cputime_to_cputime64(cputime); - if (hardirq_count() - hardirq_offset) + if (hardirq_count() - hardirq_offset || (p->flags & PF_HARDIRQ)) cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count() || (p->flags & PF_SOFTIRQ)) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); @@ -6110,6 +6110,27 @@ int __sched cond_resched_softirq_context } EXPORT_SYMBOL(cond_resched_softirq_context); +/* + * Preempt a hardirq context if necessary (possible with hardirq threading): + */ +int cond_resched_hardirq_context(void) +{ + WARN_ON_ONCE(!in_irq()); + WARN_ON_ONCE(!irqs_disabled()); + + if (hardirq_need_resched()) { + irq_exit(); + local_irq_enable(); + __cond_resched(); + local_irq_disable(); + __irq_enter(); + + return 1; + } + return 0; +} +EXPORT_SYMBOL(cond_resched_hardirq_context); + /** * yield - yield the current processor to other threads. * patches/preempt-irqs-core-fix.patch0000664000076400007640000000141711160370372016423 0ustar tglxtglxSubject: remove one extra (incorrect) test in mask_ack_irq() Date: Wed, 18 Mar 2009 19:16:46 -0700 From: Frank Rowand preempt-irqs-core.patch added an incorrect check for the existence of desc->chip->ack to mask_ack_irq() Signed-off-by: Frank Rowand --- kernel/irq/chip.c | 1 - 1 file changed, 1 deletion(-) Index: linux-2.6-tip/kernel/irq/chip.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/chip.c +++ linux-2.6-tip/kernel/irq/chip.c @@ -293,7 +293,6 @@ static inline void mask_ack_irq(struct i if (desc->chip->mask_ack) desc->chip->mask_ack(irq); else { - if (desc->chip->ack) if (desc->chip->mask) desc->chip->mask(irq); if (desc->chip->ack) patches/preempt-irqs-direct-debug-keyboard.patch0000664000076400007640000000512511155177561021054 0ustar tglxtglxSubject: preempt: irqs direct debug keyboard From: Ingo Molnar Date: Wed Feb 04 00:03:09 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/sched.h | 6 ++++++ init/main.c | 2 ++ kernel/irq/handle.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+) Index: linux-2.6-tip/include/linux/sched.h =================================================================== --- linux-2.6-tip.orig/include/linux/sched.h +++ linux-2.6-tip/include/linux/sched.h @@ -306,6 +306,12 @@ extern void scheduler_tick(void); extern void sched_show_task(struct task_struct *p); +#ifdef CONFIG_GENERIC_HARDIRQS +extern int debug_direct_keyboard; +#else +# define debug_direct_keyboard 0 +#endif + #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); extern void touch_softlockup_watchdog(void); Index: linux-2.6-tip/init/main.c =================================================================== --- linux-2.6-tip.orig/init/main.c +++ linux-2.6-tip/init/main.c @@ -898,5 +898,7 @@ static int __init kernel_init(void * unu */ init_post(); + WARN_ON(debug_direct_keyboard); + return 0; } Index: linux-2.6-tip/kernel/irq/handle.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/handle.c +++ linux-2.6-tip/kernel/irq/handle.c @@ -355,6 +355,11 @@ irqreturn_t handle_IRQ_event(unsigned in irqreturn_t ret, retval = IRQ_NONE; unsigned int status = 0; +#ifdef __i386__ + if (debug_direct_keyboard && irq == 1) + lockdep_off(); +#endif + /* * Unconditionally enable interrupts for threaded * IRQ handlers: @@ -391,9 +396,30 @@ irqreturn_t handle_IRQ_event(unsigned in } local_irq_disable(); +#ifdef __i386__ + if (debug_direct_keyboard && irq == 1) + lockdep_on(); +#endif return retval; } +/* + * Hack - used for development only. + */ +int __read_mostly debug_direct_keyboard = 0; + +int __init debug_direct_keyboard_setup(char *str) +{ + debug_direct_keyboard = 1; + printk(KERN_INFO "Switching IRQ 1 (keyboard) to to direct!\n"); +#ifdef CONFIG_PREEMPT_RT + printk(KERN_INFO "WARNING: kernel may easily crash this way!\n"); +#endif + return 1; +} + +__setup("debug_direct_keyboard", debug_direct_keyboard_setup); + int redirect_hardirq(struct irq_desc *desc) { /* @@ -403,6 +429,11 @@ int redirect_hardirq(struct irq_desc *de !desc->thread) return 0; +#ifdef __i386__ + if (debug_direct_keyboard && (desc - irq_desc == 1)) + return 0; +#endif + BUG_ON(!irqs_disabled()); if (desc->thread && desc->thread->state != TASK_RUNNING) wake_up_process(desc->thread); patches/preempt-realtime-direct-keyboard-sparseirq-fix.patch0000664000076400007640000000125411156214147023377 0ustar tglxtglxSubject: rt: irq handle.c fix2 From: Ingo Molnar Date: Sun Feb 08 18:06:00 CET 2009 => fold back to mingo-rt-irq-handle.c-fix.patch Signed-off-by: Ingo Molnar --- kernel/irq/handle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/kernel/irq/handle.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/handle.c +++ linux-2.6-tip/kernel/irq/handle.c @@ -430,7 +430,7 @@ int redirect_hardirq(struct irq_desc *de return 0; #ifdef __i386__ - if (debug_direct_keyboard && (desc - irq_desc == 1)) + if (debug_direct_keyboard && desc->irq == 1) return 0; #endif patches/preempt-irqs-timer.patch0000664000076400007640000001655611150327144016037 0ustar tglxtglxSubject: preempt: irqs timer From: Ingo Molnar Date: Wed Feb 04 00:03:09 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/timer.h | 4 + kernel/timer.c | 131 ++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 99 insertions(+), 36 deletions(-) Index: linux-2.6-tip/include/linux/timer.h =================================================================== --- linux-2.6-tip.orig/include/linux/timer.h +++ linux-2.6-tip/include/linux/timer.h @@ -223,10 +223,12 @@ static inline void timer_stats_timer_cle extern void add_timer(struct timer_list *timer); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_SOFTIRQS) + extern int timer_pending_sync(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); extern int del_timer_sync(struct timer_list *timer); #else +# define timer_pending_sync(t) timer_pending(t) # define try_to_del_timer_sync(t) del_timer(t) # define del_timer_sync(t) del_timer(t) #endif Index: linux-2.6-tip/kernel/timer.c =================================================================== --- linux-2.6-tip.orig/kernel/timer.c +++ linux-2.6-tip/kernel/timer.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -69,6 +70,7 @@ struct tvec_root { struct tvec_base { spinlock_t lock; struct timer_list *running_timer; + wait_queue_head_t wait_for_running_timer; unsigned long timer_jiffies; struct tvec_root tv1; struct tvec tv2; @@ -316,9 +318,7 @@ EXPORT_SYMBOL_GPL(round_jiffies_up_relat static inline void set_running_timer(struct tvec_base *base, struct timer_list *timer) { -#ifdef CONFIG_SMP base->running_timer = timer; -#endif } static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) @@ -605,9 +605,7 @@ __mod_timer(struct timer_list *timer, un { struct tvec_base *base, *new_base; unsigned long flags; - int ret; - - ret = 0; + int cpu, ret = 0; timer_stats_timer_set_start_info(timer); BUG_ON(!timer->function); @@ -624,7 +622,8 @@ __mod_timer(struct timer_list *timer, un debug_timer_activate(timer); - new_base = __get_cpu_var(tvec_bases); + cpu = raw_smp_processor_id(); + new_base = per_cpu(tvec_bases, cpu); if (base != new_base) { /* @@ -754,6 +753,18 @@ void add_timer_on(struct timer_list *tim spin_unlock_irqrestore(&base->lock, flags); } +/* + * Wait for a running timer + */ +void wait_for_running_timer(struct timer_list *timer) +{ + struct tvec_base *base = timer->base; + + if (base->running_timer == timer) + wait_event(base->wait_for_running_timer, + base->running_timer != timer); +} + /** * del_timer - deactive a timer. * @timer: the timer to be deactivated @@ -785,7 +796,34 @@ int del_timer(struct timer_list *timer) } EXPORT_SYMBOL(del_timer); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_SOFTIRQS) +/* + * This function checks whether a timer is active and not running on any + * CPU. Upon successful (ret >= 0) exit the timer is not queued and the + * handler is not running on any CPU. + * + * It must not be called from interrupt contexts. + */ +int timer_pending_sync(struct timer_list *timer) +{ + struct tvec_base *base; + unsigned long flags; + int ret = -1; + + base = lock_timer_base(timer, &flags); + + if (base->running_timer == timer) + goto out; + + ret = 0; + if (timer_pending(timer)) + ret = 1; +out: + spin_unlock_irqrestore(&base->lock, flags); + + return ret; +} + /** * try_to_del_timer_sync - Try to deactivate a timer * @timer: timer do del @@ -850,7 +888,7 @@ int del_timer_sync(struct timer_list *ti int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; - cpu_relax(); + wait_for_running_timer(timer); } } EXPORT_SYMBOL(del_timer_sync); @@ -895,6 +933,20 @@ static inline void __run_timers(struct t struct list_head *head = &work_list; int index = base->timer_jiffies & TVR_MASK; + if (softirq_need_resched()) { + spin_unlock_irq(&base->lock); + wake_up(&base->wait_for_running_timer); + cond_resched_softirq_context(); + cpu_relax(); + spin_lock_irq(&base->lock); + /* + * We can simply continue after preemption, nobody + * else can touch timer_jiffies so 'index' is still + * valid. Any new jiffy will be taken care of in + * subsequent loops: + */ + } + /* * Cascade timers: */ @@ -948,18 +1000,17 @@ static inline void __run_timers(struct t lock_map_release(&lockdep_map); if (preempt_count != preempt_count()) { - printk(KERN_ERR "huh, entered %p " - "with preempt_count %08x, exited" - " with %08x?\n", - fn, preempt_count, - preempt_count()); - BUG(); + print_symbol("BUG: unbalanced timer-handler preempt count in %s!\n", (unsigned long) fn); + printk("entered with %08x, exited with %08x.\n", preempt_count, preempt_count()); + preempt_count() = preempt_count; } } + set_running_timer(base, NULL); + cond_resched_softirq_context(); spin_lock_irq(&base->lock); } } - set_running_timer(base, NULL); + wake_up(&base->wait_for_running_timer); spin_unlock_irq(&base->lock); } @@ -1111,11 +1162,11 @@ void update_process_times(int user_tick) /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); + scheduler_tick(); run_local_timers(); if (rcu_pending(cpu)) rcu_check_callbacks(cpu, user_tick); printk_tick(); - scheduler_tick(); run_posix_cpu_timers(p); } @@ -1161,19 +1212,6 @@ static inline void calc_load(unsigned lo } /* - * This function runs timers and the timer-tq in bottom half context. - */ -static void run_timer_softirq(struct softirq_action *h) -{ - struct tvec_base *base = __get_cpu_var(tvec_bases); - - hrtimer_run_pending(); - - if (time_after_eq(jiffies, base->timer_jiffies)) - __run_timers(base); -} - -/* * Called by the local, per-CPU timer interrupt on SMP. */ void run_local_timers(void) @@ -1184,13 +1222,36 @@ void run_local_timers(void) } /* - * Called by the timer interrupt. xtime_lock must already be taken - * by the timer IRQ! + * Time of day handling: */ -static inline void update_times(unsigned long ticks) +static inline void update_times(void) { - update_wall_time(); - calc_load(ticks); + static unsigned long last_tick = INITIAL_JIFFIES; + unsigned long ticks, flags; + + write_seqlock_irqsave(&xtime_lock, flags); + ticks = jiffies - last_tick; + if (ticks) { + last_tick += ticks; + update_wall_time(); + calc_load(ticks); + } + write_sequnlock_irqrestore(&xtime_lock, flags); +} + + +/* + * This function runs timers and the timer-tq in bottom half context. + */ +static void run_timer_softirq(struct softirq_action *h) +{ + struct tvec_base *base = __get_cpu_var(tvec_bases); + + update_times(); + hrtimer_run_pending(); + + if (time_after_eq(jiffies, base->timer_jiffies)) + __run_timers(base); } /* @@ -1202,7 +1263,6 @@ static inline void update_times(unsigned void do_timer(unsigned long ticks) { jiffies_64 += ticks; - update_times(ticks); } #ifdef __ARCH_WANT_SYS_ALARM @@ -1536,6 +1596,7 @@ static int __cpuinit init_timers_cpu(int } spin_lock_init(&base->lock); + init_waitqueue_head(&base->wait_for_running_timer); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); patches/preempt-irqs-hrtimer.patch0000664000076400007640000001025211157735274016372 0ustar tglxtglxSubject: patches/preempt-irqs-hrtimer.patch Signed-off-by: Ingo Molnar --- include/linux/hrtimer.h | 10 ++++++++++ kernel/hrtimer.c | 33 ++++++++++++++++++++++++++++++++- kernel/itimer.c | 1 + kernel/posix-timers.c | 3 +++ 4 files changed, 46 insertions(+), 1 deletion(-) Index: linux-2.6-tip/include/linux/hrtimer.h =================================================================== --- linux-2.6-tip.orig/include/linux/hrtimer.h +++ linux-2.6-tip/include/linux/hrtimer.h @@ -173,6 +173,9 @@ struct hrtimer_cpu_base { int hres_active; unsigned long nr_events; #endif +#ifdef CONFIG_PREEMPT_SOFTIRQS + wait_queue_head_t wait; +#endif }; static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) @@ -360,6 +363,13 @@ static inline int hrtimer_restart(struct return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } +/* Softirq preemption could deadlock timer removal */ +#ifdef CONFIG_PREEMPT_SOFTIRQS + extern void hrtimer_wait_for_timer(const struct hrtimer *timer); +#else +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) +#endif + /* Query timers: */ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); Index: linux-2.6-tip/kernel/hrtimer.c =================================================================== --- linux-2.6-tip.orig/kernel/hrtimer.c +++ linux-2.6-tip/kernel/hrtimer.c @@ -836,6 +836,32 @@ static int enqueue_hrtimer(struct hrtime return leftmost; } +#ifdef CONFIG_PREEMPT_SOFTIRQS +# define wake_up_timer_waiters(b) wake_up(&(b)->wait) + +/** + * hrtimer_wait_for_timer - Wait for a running timer + * + * @timer: timer to wait for + * + * The function waits in case the timers callback function is + * currently executed on the waitqueue of the timer base. The + * waitqueue is woken up after the timer callback function has + * finished execution. + */ +void hrtimer_wait_for_timer(const struct hrtimer *timer) +{ + struct hrtimer_clock_base *base = timer->base; + + if (base && base->cpu_base) + wait_event(base->cpu_base->wait, + !(timer->state & HRTIMER_STATE_CALLBACK)); +} + +#else +# define wake_up_timer_waiters(b) do { } while (0) +#endif + /* * __remove_hrtimer - internal function to remove a timer * @@ -864,6 +890,8 @@ static void __remove_hrtimer(struct hrti rb_erase(&timer->node, &base->active); } timer->state = newstate; + + wake_up_timer_waiters(base->cpu_base); } /* @@ -1022,7 +1050,7 @@ int hrtimer_cancel(struct hrtimer *timer if (ret >= 0) return ret; - cpu_relax(); + hrtimer_wait_for_timer(timer); } } EXPORT_SYMBOL_GPL(hrtimer_cancel); @@ -1545,6 +1573,9 @@ static void __cpuinit init_hrtimers_cpu( cpu_base->clock_base[i].cpu_base = cpu_base; hrtimer_init_hres(cpu_base); +#ifdef CONFIG_PREEMPT_SOFTIRQS + init_waitqueue_head(&cpu_base->wait); +#endif } #ifdef CONFIG_HOTPLUG_CPU Index: linux-2.6-tip/kernel/itimer.c =================================================================== --- linux-2.6-tip.orig/kernel/itimer.c +++ linux-2.6-tip/kernel/itimer.c @@ -161,6 +161,7 @@ again: /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); + hrtimer_wait_for_timer(&tsk->signal->real_timer); goto again; } expires = timeval_to_ktime(value->it_value); Index: linux-2.6-tip/kernel/posix-timers.c =================================================================== --- linux-2.6-tip.orig/kernel/posix-timers.c +++ linux-2.6-tip/kernel/posix-timers.c @@ -789,6 +789,7 @@ retry: unlock_timer(timr, flag); if (error == TIMER_RETRY) { + hrtimer_wait_for_timer(&timr->it.real.timer); rtn = NULL; // We already got the old time... goto retry; } @@ -827,6 +828,7 @@ retry_delete: if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); + hrtimer_wait_for_timer(&timer->it.real.timer); goto retry_delete; } @@ -856,6 +858,7 @@ retry_delete: if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); + hrtimer_wait_for_timer(&timer->it.real.timer); goto retry_delete; } list_del(&timer->list); patches/preempt-irqs-i386.patch0000664000076400007640000000474011150464224015401 0ustar tglxtglxSubject: preempt: irqs i386 From: Ingo Molnar Date: Wed Feb 04 00:03:08 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/kernel/i8259.c | 10 ++++++---- arch/x86/kernel/irqinit_32.c | 2 ++ arch/x86/kernel/visws_quirks.c | 2 ++ 3 files changed, 10 insertions(+), 4 deletions(-) Index: linux-2.6-tip/arch/x86/kernel/i8259.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/i8259.c +++ linux-2.6-tip/arch/x86/kernel/i8259.c @@ -168,6 +168,8 @@ static void mask_and_ack_8259A(unsigned */ if (cached_irq_mask & irqmask) goto spurious_8259A_irq; + if (irq & 8) + outb(0x60+(irq&7), PIC_SLAVE_CMD); /* 'Specific EOI' to slave */ cached_irq_mask |= irqmask; handle_real_irq: @@ -328,10 +330,10 @@ void init_8259A(int auto_eoi) /* 8259A-1 (the master) has a slave on IR2 */ outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); - if (auto_eoi) /* master does Auto EOI */ - outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); - else /* master expects normal EOI */ - outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); + if (!auto_eoi) /* master expects normal EOI */ + outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); + else /* master does Auto EOI */ + outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ Index: linux-2.6-tip/arch/x86/kernel/irqinit_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/irqinit_32.c +++ linux-2.6-tip/arch/x86/kernel/irqinit_32.c @@ -50,6 +50,7 @@ static irqreturn_t math_error_irq(int cp */ static struct irqaction fpu_irq = { .handler = math_error_irq, + .flags = IRQF_NODELAY, .mask = CPU_MASK_NONE, .name = "fpu", }; @@ -83,6 +84,7 @@ void __init init_ISA_irqs(void) */ static struct irqaction irq2 = { .handler = no_action, + .flags = IRQF_NODELAY, .mask = CPU_MASK_NONE, .name = "cascade", }; Index: linux-2.6-tip/arch/x86/kernel/visws_quirks.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/visws_quirks.c +++ linux-2.6-tip/arch/x86/kernel/visws_quirks.c @@ -643,11 +643,13 @@ out_unlock: static struct irqaction master_action = { .handler = piix4_master_intr, .name = "PIIX4-8259", + .flags = IRQF_NODELAY, }; static struct irqaction cascade_action = { .handler = no_action, .name = "cascade", + .flags = IRQF_NODELAY, }; patches/preempt-irqs-Kconfig.patch0000664000076400007640000000225211156731445016274 0ustar tglxtglxSubject: preempt: irqs Kconfig From: Ingo Molnar Date: Wed Feb 04 00:03:07 CET 2009 Signed-off-by: Ingo Molnar --- kernel/Kconfig.preempt | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) Index: linux-2.6-tip/kernel/Kconfig.preempt =================================================================== --- linux-2.6-tip.orig/kernel/Kconfig.preempt +++ linux-2.6-tip/kernel/Kconfig.preempt @@ -68,3 +68,22 @@ config PREEMPT_SOFTIRQS Say N if you are unsure. +config PREEMPT_HARDIRQS + bool "Thread Hardirqs" + default n + depends on GENERIC_HARDIRQS_NO__DO_IRQ + select PREEMPT_SOFTIRQS + help + This option reduces the latency of the kernel by 'threading' + hardirqs. This means that all (or selected) hardirqs will run + in their own kernel thread context. While this helps latency, + this feature can also reduce performance. + + The threading of hardirqs can also be controlled via the + /proc/sys/kernel/hardirq_preemption runtime flag and the + hardirq-preempt=0/1 boot-time option. Per-irq threading can + be enabled/disable via the /proc/irq///threaded + runtime flags. + + Say N if you are unsure. + patches/preempt-irqs-port-fixes.patch0000664000076400007640000000206111157735274017017 0ustar tglxtglxSubject: preempt: irqs port fixes From: Ingo Molnar Date: Wed Feb 04 01:41:19 CET 2009 cpumask infrastructure related changes. Signed-off-by: Ingo Molnar --- kernel/irq/manage.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) Index: linux-2.6-tip/kernel/irq/manage.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/manage.c +++ linux-2.6-tip/kernel/irq/manage.c @@ -970,7 +970,7 @@ static int do_irqd(void * __desc) struct irq_desc *desc = __desc; #ifdef CONFIG_SMP - set_cpus_allowed(current, desc->affinity); + set_cpus_allowed_ptr(current, desc->affinity); #endif current->flags |= PF_NOFREEZE | PF_HARDIRQ; @@ -993,8 +993,8 @@ static int do_irqd(void * __desc) /* * Did IRQ affinities change? */ - if (!cpus_equal(current->cpus_allowed, desc->affinity)) - set_cpus_allowed(current, desc->affinity); + if (!cpumask_equal(¤t->cpus_allowed, desc->affinity)) + set_cpus_allowed_ptr(current, desc->affinity); #endif schedule(); } patches/rt-apis.patch0000664000076400007640000000643211157735274013656 0ustar tglxtglxSubject: patches/rt-apis.patch add new, -rt specific IRQ API variants. Maps to the same as before on non-PREEMPT_RT. include/linux/bottom_half.h | 8 ++++++++ include/linux/interrupt.h | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 1 deletion(-) Signed-off-by: Ingo Molnar Index: linux-2.6-tip/include/linux/bottom_half.h =================================================================== --- linux-2.6-tip.orig/include/linux/bottom_half.h +++ linux-2.6-tip/include/linux/bottom_half.h @@ -1,9 +1,17 @@ #ifndef _LINUX_BH_H #define _LINUX_BH_H +#ifdef CONFIG_PREEMPT_RT +# define local_bh_disable() do { } while (0) +# define __local_bh_disable(ip) do { } while (0) +# define _local_bh_enable() do { } while (0) +# define local_bh_enable() do { } while (0) +# define local_bh_enable_ip(ip) do { } while (0) +#else extern void local_bh_disable(void); extern void _local_bh_enable(void); extern void local_bh_enable(void); extern void local_bh_enable_ip(unsigned long ip); +#endif #endif /* _LINUX_BH_H */ Index: linux-2.6-tip/include/linux/interrupt.h =================================================================== --- linux-2.6-tip.orig/include/linux/interrupt.h +++ linux-2.6-tip/include/linux/interrupt.h @@ -112,7 +112,7 @@ extern void devm_free_irq(struct device #ifdef CONFIG_LOCKDEP # define local_irq_enable_in_hardirq() do { } while (0) #else -# define local_irq_enable_in_hardirq() local_irq_enable() +# define local_irq_enable_in_hardirq() local_irq_enable_nort() #endif extern void disable_irq_nosync(unsigned int irq); @@ -526,4 +526,37 @@ extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); extern int arch_init_chip_data(struct irq_desc *desc, int cpu); +#ifdef CONFIG_PREEMPT_RT +# define local_irq_disable_nort() do { } while (0) +# define local_irq_enable_nort() do { } while (0) +# define local_irq_enable_rt() local_irq_enable() +# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0) +# define local_irq_restore_nort(flags) do { (void)(flags); } while (0) +# define spin_lock_nort(lock) do { } while (0) +# define spin_unlock_nort(lock) do { } while (0) +# define spin_lock_bh_nort(lock) do { } while (0) +# define spin_unlock_bh_nort(lock) do { } while (0) +# define spin_lock_rt(lock) spin_lock(lock) +# define spin_unlock_rt(lock) spin_unlock(lock) +# define smp_processor_id_rt(cpu) (cpu) +# define in_atomic_rt() (!oops_in_progress && \ + (in_atomic() || irqs_disabled())) +# define read_trylock_rt(lock) ({read_lock(lock); 1; }) +#else +# define local_irq_disable_nort() local_irq_disable() +# define local_irq_enable_nort() local_irq_enable() +# define local_irq_enable_rt() do { } while (0) +# define local_irq_save_nort(flags) local_irq_save(flags) +# define local_irq_restore_nort(flags) local_irq_restore(flags) +# define spin_lock_rt(lock) do { } while (0) +# define spin_unlock_rt(lock) do { } while (0) +# define spin_lock_nort(lock) spin_lock(lock) +# define spin_unlock_nort(lock) spin_unlock(lock) +# define spin_lock_bh_nort(lock) spin_lock_bh(lock) +# define spin_unlock_bh_nort(lock) spin_unlock_bh(lock) +# define smp_processor_id_rt(cpu) smp_processor_id() +# define in_atomic_rt() 0 +# define read_trylock_rt(lock) read_trylock(lock) +#endif + #endif patches/rt-slab-new.patch0000664000076400007640000011441211150327754014421 0ustar tglxtglxSubject: patches/rt-slab-new.patch new slab port. Signed-off-by: Ingo Molnar Folded in: From ak@suse.de Wed Sep 26 10:34:53 2007 Date: Mon, 17 Sep 2007 15:36:59 +0200 From: Andi Kleen To: mingo@elte.hu, Thomas Gleixner Cc: linux-rt-users@vger.kernel.org __do_cache_allow/alternate_node_alloc() need to pass the this_cpu variable from the caller to cache_grow(); otherwise the slab lock for the wrong CPU can be released when a task switches CPUs inside cache_grow(). Signed-off-by: Andi Kleen --- mm/slab.c | 493 +++++++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 316 insertions(+), 177 deletions(-) Index: linux-2.6-tip/mm/slab.c =================================================================== --- linux-2.6-tip.orig/mm/slab.c +++ linux-2.6-tip/mm/slab.c @@ -120,6 +120,63 @@ #include /* + * On !PREEMPT_RT, raw irq flags are used as a per-CPU locking + * mechanism. + * + * On PREEMPT_RT, we use per-CPU locks for this. That's why the + * calling convention is changed slightly: a new 'flags' argument + * is passed to 'irq disable/enable' - the PREEMPT_RT code stores + * the CPU number of the lock there. + */ +#ifndef CONFIG_PREEMPT_RT +# define slab_irq_disable(cpu) \ + do { local_irq_disable(); (cpu) = smp_processor_id(); } while (0) +# define slab_irq_enable(cpu) local_irq_enable() +# define slab_irq_save(flags, cpu) \ + do { local_irq_save(flags); (cpu) = smp_processor_id(); } while (0) +# define slab_irq_restore(flags, cpu) local_irq_restore(flags) +/* + * In the __GFP_WAIT case we enable/disable interrupts on !PREEMPT_RT, + * which has no per-CPU locking effect since we are holding the cache + * lock in that case already. + * + * (On PREEMPT_RT, these are NOPs, but we have to drop/get the irq locks.) + */ +# define slab_irq_disable_nort() local_irq_disable() +# define slab_irq_enable_nort() local_irq_enable() +# define slab_irq_disable_rt(flags) do { (void)(flags); } while (0) +# define slab_irq_enable_rt(flags) do { (void)(flags); } while (0) +# define slab_spin_lock_irq(lock, cpu) \ + do { spin_lock_irq(lock); (cpu) = smp_processor_id(); } while (0) +# define slab_spin_unlock_irq(lock, cpu) \ + spin_unlock_irq(lock) +# define slab_spin_lock_irqsave(lock, flags, cpu) \ + do { spin_lock_irqsave(lock, flags); (cpu) = smp_processor_id(); } while (0) +# define slab_spin_unlock_irqrestore(lock, flags, cpu) \ + do { spin_unlock_irqrestore(lock, flags); } while (0) +#else +DEFINE_PER_CPU_LOCKED(int, slab_irq_locks) = { 0, }; +# define slab_irq_disable(cpu) (void)get_cpu_var_locked(slab_irq_locks, &(cpu)) +# define slab_irq_enable(cpu) put_cpu_var_locked(slab_irq_locks, cpu) +# define slab_irq_save(flags, cpu) \ + do { slab_irq_disable(cpu); (void) (flags); } while (0) +# define slab_irq_restore(flags, cpu) \ + do { slab_irq_enable(cpu); (void) (flags); } while (0) +# define slab_irq_disable_rt(cpu) slab_irq_disable(cpu) +# define slab_irq_enable_rt(cpu) slab_irq_enable(cpu) +# define slab_irq_disable_nort() do { } while (0) +# define slab_irq_enable_nort() do { } while (0) +# define slab_spin_lock_irq(lock, cpu) \ + do { slab_irq_disable(cpu); spin_lock(lock); } while (0) +# define slab_spin_unlock_irq(lock, cpu) \ + do { spin_unlock(lock); slab_irq_enable(cpu); } while (0) +# define slab_spin_lock_irqsave(lock, flags, cpu) \ + do { slab_irq_disable(cpu); spin_lock_irqsave(lock, flags); } while (0) +# define slab_spin_unlock_irqrestore(lock, flags, cpu) \ + do { spin_unlock_irqrestore(lock, flags); slab_irq_enable(cpu); } while (0) +#endif + +/* * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). * @@ -315,7 +372,7 @@ struct kmem_list3 __initdata initkmem_li static int drain_freelist(struct kmem_cache *cache, struct kmem_list3 *l3, int tofree); static void free_block(struct kmem_cache *cachep, void **objpp, int len, - int node); + int node, int *this_cpu); static int enable_cpucache(struct kmem_cache *cachep); static void cache_reap(struct work_struct *unused); @@ -685,9 +742,10 @@ int slab_is_available(void) static DEFINE_PER_CPU(struct delayed_work, reap_work); -static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) +static inline struct array_cache * +cpu_cache_get(struct kmem_cache *cachep, int this_cpu) { - return cachep->array[smp_processor_id()]; + return cachep->array[this_cpu]; } static inline struct kmem_cache *__find_general_cachep(size_t size, @@ -921,7 +979,7 @@ static int transfer_objects(struct array #ifndef CONFIG_NUMA #define drain_alien_cache(cachep, alien) do { } while (0) -#define reap_alien(cachep, l3) do { } while (0) +#define reap_alien(cachep, l3, this_cpu) do { } while (0) static inline struct array_cache **alloc_alien_cache(int node, int limit) { @@ -932,27 +990,29 @@ static inline void free_alien_cache(stru { } -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +static inline int +cache_free_alien(struct kmem_cache *cachep, void *objp, int *this_cpu) { return 0; } static inline void *alternate_node_alloc(struct kmem_cache *cachep, - gfp_t flags) + gfp_t flags, int *this_cpu) { return NULL; } static inline void *____cache_alloc_node(struct kmem_cache *cachep, - gfp_t flags, int nodeid) + gfp_t flags, int nodeid, int *this_cpu) { return NULL; } #else /* CONFIG_NUMA */ -static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); -static void *alternate_node_alloc(struct kmem_cache *, gfp_t); +static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, + int nodeid, int *this_cpu); +static void *alternate_node_alloc(struct kmem_cache *, gfp_t, int *); static struct array_cache **alloc_alien_cache(int node, int limit) { @@ -993,7 +1053,8 @@ static void free_alien_cache(struct arra } static void __drain_alien_cache(struct kmem_cache *cachep, - struct array_cache *ac, int node) + struct array_cache *ac, int node, + int *this_cpu) { struct kmem_list3 *rl3 = cachep->nodelists[node]; @@ -1007,7 +1068,7 @@ static void __drain_alien_cache(struct k if (rl3->shared) transfer_objects(rl3->shared, ac, ac->limit); - free_block(cachep, ac->entry, ac->avail, node); + free_block(cachep, ac->entry, ac->avail, node, this_cpu); ac->avail = 0; spin_unlock(&rl3->list_lock); } @@ -1016,15 +1077,16 @@ static void __drain_alien_cache(struct k /* * Called from cache_reap() to regularly drain alien caches round robin. */ -static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) +static void +reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3, int *this_cpu) { - int node = __get_cpu_var(reap_node); + int node = per_cpu(reap_node, *this_cpu); if (l3->alien) { struct array_cache *ac = l3->alien[node]; if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { - __drain_alien_cache(cachep, ac, node); + __drain_alien_cache(cachep, ac, node, this_cpu); spin_unlock_irq(&ac->lock); } } @@ -1033,21 +1095,22 @@ static void reap_alien(struct kmem_cache static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) { - int i = 0; + int i = 0, this_cpu; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { ac = alien[i]; if (ac) { - spin_lock_irqsave(&ac->lock, flags); - __drain_alien_cache(cachep, ac, i); - spin_unlock_irqrestore(&ac->lock, flags); + slab_spin_lock_irqsave(&ac->lock, flags, this_cpu); + __drain_alien_cache(cachep, ac, i, &this_cpu); + slab_spin_unlock_irqrestore(&ac->lock, flags, this_cpu); } } } -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +static inline int +cache_free_alien(struct kmem_cache *cachep, void *objp, int *this_cpu) { struct slab *slabp = virt_to_slab(objp); int nodeid = slabp->nodeid; @@ -1071,13 +1134,13 @@ static inline int cache_free_alien(struc spin_lock(&alien->lock); if (unlikely(alien->avail == alien->limit)) { STATS_INC_ACOVERFLOW(cachep); - __drain_alien_cache(cachep, alien, nodeid); + __drain_alien_cache(cachep, alien, nodeid, this_cpu); } alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); } else { spin_lock(&(cachep->nodelists[nodeid])->list_lock); - free_block(cachep, &objp, 1, nodeid); + free_block(cachep, &objp, 1, nodeid, this_cpu); spin_unlock(&(cachep->nodelists[nodeid])->list_lock); } return 1; @@ -1095,6 +1158,7 @@ static void __cpuinit cpuup_canceled(lon struct array_cache *nc; struct array_cache *shared; struct array_cache **alien; + int this_cpu; /* cpu is dead; no one can alloc from it. */ nc = cachep->array[cpu]; @@ -1104,29 +1168,31 @@ static void __cpuinit cpuup_canceled(lon if (!l3) goto free_array_cache; - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); /* Free limit for this kmem_list3 */ l3->free_limit -= cachep->batchcount; if (nc) - free_block(cachep, nc->entry, nc->avail, node); + free_block(cachep, nc->entry, nc->avail, node, + &this_cpu); if (!cpus_empty(*mask)) { - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, + this_cpu); goto free_array_cache; } shared = l3->shared; if (shared) { free_block(cachep, shared->entry, - shared->avail, node); + shared->avail, node, &this_cpu); l3->shared = NULL; } alien = l3->alien; l3->alien = NULL; - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); kfree(shared); if (alien) { @@ -1155,6 +1221,7 @@ static int __cpuinit cpuup_prepare(long struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); const int memsize = sizeof(struct kmem_list3); + int this_cpu; /* * We need to do this right in the beginning since @@ -1185,11 +1252,11 @@ static int __cpuinit cpuup_prepare(long cachep->nodelists[node] = l3; } - spin_lock_irq(&cachep->nodelists[node]->list_lock); + slab_spin_lock_irq(&cachep->nodelists[node]->list_lock, this_cpu); cachep->nodelists[node]->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; - spin_unlock_irq(&cachep->nodelists[node]->list_lock); + slab_spin_unlock_irq(&cachep->nodelists[node]->list_lock, this_cpu); } /* @@ -1226,7 +1293,7 @@ static int __cpuinit cpuup_prepare(long l3 = cachep->nodelists[node]; BUG_ON(!l3); - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); if (!l3->shared) { /* * We are serialised from CPU_DEAD or @@ -1241,7 +1308,7 @@ static int __cpuinit cpuup_prepare(long alien = NULL; } #endif - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); kfree(shared); free_alien_cache(alien); } @@ -1318,11 +1385,13 @@ static void init_list(struct kmem_cache int nodeid) { struct kmem_list3 *ptr; + int this_cpu; ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); BUG_ON(!ptr); - local_irq_disable(); + WARN_ON(spin_is_locked(&list->list_lock)); + slab_irq_disable(this_cpu); memcpy(ptr, list, sizeof(struct kmem_list3)); /* * Do not assume that spinlocks can be initialized via memcpy: @@ -1331,7 +1400,7 @@ static void init_list(struct kmem_cache MAKE_ALL_LISTS(cachep, ptr, nodeid); cachep->nodelists[nodeid] = ptr; - local_irq_enable(); + slab_irq_enable(this_cpu); } /* @@ -1494,36 +1563,34 @@ void __init kmem_cache_init(void) /* 4) Replace the bootstrap head arrays */ { struct array_cache *ptr; + int this_cpu; ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); - local_irq_disable(); - BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); - memcpy(ptr, cpu_cache_get(&cache_cache), - sizeof(struct arraycache_init)); + slab_irq_disable(this_cpu); + BUG_ON(cpu_cache_get(&cache_cache, this_cpu) != &initarray_cache.cache); + memcpy(ptr, cpu_cache_get(&cache_cache, this_cpu), + sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); - - cache_cache.array[smp_processor_id()] = ptr; - local_irq_enable(); + cache_cache.array[this_cpu] = ptr; + slab_irq_enable(this_cpu); ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); - local_irq_disable(); - BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) - != &initarray_generic.cache); - memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), - sizeof(struct arraycache_init)); + slab_irq_disable(this_cpu); + BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep, this_cpu) + != &initarray_generic.cache); + memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep, this_cpu), + sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock); - - malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = - ptr; - local_irq_enable(); + malloc_sizes[INDEX_AC].cs_cachep->array[this_cpu] = ptr; + slab_irq_enable(this_cpu); } /* 5) Replace the bootstrap kmem_list3's */ { @@ -1687,7 +1754,7 @@ static void store_stackinfo(struct kmem_ *addr++ = 0x12345678; *addr++ = caller; - *addr++ = smp_processor_id(); + *addr++ = raw_smp_processor_id(); size -= 3 * sizeof(unsigned long); { unsigned long *sptr = &caller; @@ -1877,6 +1944,10 @@ static void slab_destroy_debugcheck(stru } #endif +static void +__cache_free(struct kmem_cache *cachep, void *objp, int *this_cpu); + + /** * slab_destroy - destroy and release all objects in a slab * @cachep: cache pointer being destroyed @@ -1886,7 +1957,8 @@ static void slab_destroy_debugcheck(stru * Before calling the slab must have been unlinked from the cache. The * cache-lock is not held/needed. */ -static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) +static void +slab_destroy(struct kmem_cache *cachep, struct slab *slabp, int *this_cpu) { void *addr = slabp->s_mem - slabp->colouroff; @@ -1900,8 +1972,12 @@ static void slab_destroy(struct kmem_cac call_rcu(&slab_rcu->head, kmem_rcu_free); } else { kmem_freepages(cachep, addr); - if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->slabp_cache, slabp); + if (OFF_SLAB(cachep)) { + if (this_cpu) + __cache_free(cachep->slabp_cache, slabp, this_cpu); + else + kmem_cache_free(cachep->slabp_cache, slabp); + } } } @@ -1998,6 +2074,8 @@ static size_t calculate_slab_order(struc static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) { + int this_cpu; + if (g_cpucache_up == FULL) return enable_cpucache(cachep); @@ -2041,10 +2119,12 @@ static int __init_refok setup_cpu_cache( jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; - cpu_cache_get(cachep)->avail = 0; - cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; - cpu_cache_get(cachep)->batchcount = 1; - cpu_cache_get(cachep)->touched = 0; + this_cpu = raw_smp_processor_id(); + + cpu_cache_get(cachep, this_cpu)->avail = 0; + cpu_cache_get(cachep, this_cpu)->limit = BOOT_CPUCACHE_ENTRIES; + cpu_cache_get(cachep, this_cpu)->batchcount = 1; + cpu_cache_get(cachep, this_cpu)->touched = 0; cachep->batchcount = 1; cachep->limit = BOOT_CPUCACHE_ENTRIES; return 0; @@ -2335,19 +2415,19 @@ EXPORT_SYMBOL(kmem_cache_create); #if DEBUG static void check_irq_off(void) { +/* + * On PREEMPT_RT we use locks to protect the per-CPU lists, + * and keep interrupts enabled. + */ +#ifndef CONFIG_PREEMPT_RT BUG_ON(!irqs_disabled()); +#endif } static void check_irq_on(void) { +#ifndef CONFIG_PREEMPT_RT BUG_ON(irqs_disabled()); -} - -static void check_spinlock_acquired(struct kmem_cache *cachep) -{ -#ifdef CONFIG_SMP - check_irq_off(); - assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); #endif } @@ -2362,7 +2442,6 @@ static void check_spinlock_acquired_node #else #define check_irq_off() do { } while(0) #define check_irq_on() do { } while(0) -#define check_spinlock_acquired(x) do { } while(0) #define check_spinlock_acquired_node(x, y) do { } while(0) #endif @@ -2370,26 +2449,60 @@ static void drain_array(struct kmem_cach struct array_cache *ac, int force, int node); -static void do_drain(void *arg) +static void __do_drain(void *arg, int this_cpu) { struct kmem_cache *cachep = arg; + int node = cpu_to_node(this_cpu); struct array_cache *ac; - int node = numa_node_id(); check_irq_off(); - ac = cpu_cache_get(cachep); + ac = cpu_cache_get(cachep, this_cpu); spin_lock(&cachep->nodelists[node]->list_lock); - free_block(cachep, ac->entry, ac->avail, node); + free_block(cachep, ac->entry, ac->avail, node, &this_cpu); spin_unlock(&cachep->nodelists[node]->list_lock); ac->avail = 0; } +#ifdef CONFIG_PREEMPT_RT +static void do_drain(void *arg, int this_cpu) +{ + __do_drain(arg, this_cpu); +} +#else +static void do_drain(void *arg) +{ + __do_drain(arg, smp_processor_id()); +} +#endif + +#ifdef CONFIG_PREEMPT_RT +/* + * execute func() for all CPUs. On PREEMPT_RT we dont actually have + * to run on the remote CPUs - we only have to take their CPU-locks. + * (This is a rare operation, so cacheline bouncing is not an issue.) + */ +static void +slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg) +{ + unsigned int i; + + check_irq_on(); + for_each_online_cpu(i) { + spin_lock(&__get_cpu_lock(slab_irq_locks, i)); + func(arg, i); + spin_unlock(&__get_cpu_lock(slab_irq_locks, i)); + } +} +#else +# define slab_on_each_cpu(func, cachep) on_each_cpu(func, cachep, 1) +#endif + static void drain_cpu_caches(struct kmem_cache *cachep) { struct kmem_list3 *l3; int node; - on_each_cpu(do_drain, cachep, 1); + slab_on_each_cpu(do_drain, cachep); check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; @@ -2414,16 +2527,16 @@ static int drain_freelist(struct kmem_ca struct kmem_list3 *l3, int tofree) { struct list_head *p; - int nr_freed; + int nr_freed, this_cpu; struct slab *slabp; nr_freed = 0; while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); p = l3->slabs_free.prev; if (p == &l3->slabs_free) { - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); goto out; } @@ -2432,13 +2545,9 @@ static int drain_freelist(struct kmem_ca BUG_ON(slabp->inuse); #endif list_del(&slabp->list); - /* - * Safe to drop the lock. The slab is no longer linked - * to the cache. - */ l3->free_objects -= cache->num; - spin_unlock_irq(&l3->list_lock); - slab_destroy(cache, slabp); + slab_destroy(cache, slabp, &this_cpu); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); nr_freed++; } out: @@ -2694,8 +2803,8 @@ static void slab_map_pages(struct kmem_c * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow(struct kmem_cache *cachep, - gfp_t flags, int nodeid, void *objp) +static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid, + void *objp, int *this_cpu) { struct slab *slabp; size_t offset; @@ -2724,7 +2833,8 @@ static int cache_grow(struct kmem_cache offset *= cachep->colour_off; if (local_flags & __GFP_WAIT) - local_irq_enable(); + slab_irq_enable_nort(); + slab_irq_enable_rt(*this_cpu); /* * The test for missing atomic flag is performed here, rather than @@ -2753,8 +2863,10 @@ static int cache_grow(struct kmem_cache cache_init_objs(cachep, slabp); + slab_irq_disable_rt(*this_cpu); if (local_flags & __GFP_WAIT) - local_irq_disable(); + slab_irq_disable_nort(); + check_irq_off(); spin_lock(&l3->list_lock); @@ -2767,8 +2879,9 @@ static int cache_grow(struct kmem_cache opps1: kmem_freepages(cachep, objp); failed: + slab_irq_disable_rt(*this_cpu); if (local_flags & __GFP_WAIT) - local_irq_disable(); + slab_irq_disable_nort(); return 0; } @@ -2890,7 +3003,8 @@ bad: #define check_slabp(x,y) do { } while(0) #endif -static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) +static void * +cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, int *this_cpu) { int batchcount; struct kmem_list3 *l3; @@ -2900,7 +3014,7 @@ static void *cache_alloc_refill(struct k retry: check_irq_off(); node = numa_node_id(); - ac = cpu_cache_get(cachep); + ac = cpu_cache_get(cachep, *this_cpu); batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* @@ -2910,7 +3024,7 @@ retry: */ batchcount = BATCHREFILL_LIMIT; } - l3 = cachep->nodelists[node]; + l3 = cachep->nodelists[cpu_to_node(*this_cpu)]; BUG_ON(ac->avail > 0 || !l3); spin_lock(&l3->list_lock); @@ -2933,7 +3047,7 @@ retry: slabp = list_entry(entry, struct slab, list); check_slabp(cachep, slabp); - check_spinlock_acquired(cachep); + check_spinlock_acquired_node(cachep, cpu_to_node(*this_cpu)); /* * The slab was either on partial or free list so @@ -2947,8 +3061,9 @@ retry: STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); - ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, - node); + ac->entry[ac->avail++] = + slab_get_obj(cachep, slabp, + cpu_to_node(*this_cpu)); } check_slabp(cachep, slabp); @@ -2967,10 +3082,10 @@ alloc_done: if (unlikely(!ac->avail)) { int x; - x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); + x = cache_grow(cachep, flags | GFP_THISNODE, cpu_to_node(*this_cpu), NULL, this_cpu); /* cache_grow can reenable interrupts, then ac could change. */ - ac = cpu_cache_get(cachep); + ac = cpu_cache_get(cachep, *this_cpu); if (!x && ac->avail == 0) /* no objects in sight? abort */ return NULL; @@ -3057,21 +3172,22 @@ static bool slab_should_failslab(struct return should_failslab(obj_size(cachep), flags); } -static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) +static inline void * +____cache_alloc(struct kmem_cache *cachep, gfp_t flags, int *this_cpu) { void *objp; struct array_cache *ac; check_irq_off(); - ac = cpu_cache_get(cachep); + ac = cpu_cache_get(cachep, *this_cpu); if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); ac->touched = 1; objp = ac->entry[--ac->avail]; } else { STATS_INC_ALLOCMISS(cachep); - objp = cache_alloc_refill(cachep, flags); + objp = cache_alloc_refill(cachep, flags, this_cpu); } return objp; } @@ -3083,7 +3199,8 @@ static inline void *____cache_alloc(stru * If we are in_interrupt, then process context, including cpusets and * mempolicy, may not apply and should not be used for allocation policy. */ -static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) +static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags, + int *this_cpu) { int nid_alloc, nid_here; @@ -3095,7 +3212,7 @@ static void *alternate_node_alloc(struct else if (current->mempolicy) nid_alloc = slab_node(current->mempolicy); if (nid_alloc != nid_here) - return ____cache_alloc_node(cachep, flags, nid_alloc); + return ____cache_alloc_node(cachep, flags, nid_alloc, this_cpu); return NULL; } @@ -3107,7 +3224,7 @@ static void *alternate_node_alloc(struct * allocator to do its reclaim / fallback magic. We then insert the * slab into the proper nodelist and then allocate from it. */ -static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) +static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags, int *this_cpu) { struct zonelist *zonelist; gfp_t local_flags; @@ -3135,7 +3252,8 @@ retry: cache->nodelists[nid] && cache->nodelists[nid]->free_objects) { obj = ____cache_alloc_node(cache, - flags | GFP_THISNODE, nid); + flags | GFP_THISNODE, nid, + this_cpu); if (obj) break; } @@ -3149,19 +3267,24 @@ retry: * set and go into memory reserves if necessary. */ if (local_flags & __GFP_WAIT) - local_irq_enable(); + slab_irq_enable_nort(); + slab_irq_enable_rt(*this_cpu); + kmem_flagcheck(cache, flags); obj = kmem_getpages(cache, local_flags, -1); + + slab_irq_disable_rt(*this_cpu); if (local_flags & __GFP_WAIT) - local_irq_disable(); + slab_irq_disable_nort(); + if (obj) { /* * Insert into the appropriate per node queues */ nid = page_to_nid(virt_to_page(obj)); - if (cache_grow(cache, flags, nid, obj)) { + if (cache_grow(cache, flags, nid, obj, this_cpu)) { obj = ____cache_alloc_node(cache, - flags | GFP_THISNODE, nid); + flags | GFP_THISNODE, nid, this_cpu); if (!obj) /* * Another processor may allocate the @@ -3182,7 +3305,7 @@ retry: * A interface to enable slab creation on nodeid */ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, - int nodeid) + int nodeid, int *this_cpu) { struct list_head *entry; struct slab *slabp; @@ -3230,11 +3353,11 @@ retry: must_grow: spin_unlock(&l3->list_lock); - x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); + x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL, this_cpu); if (x) goto retry; - return fallback_alloc(cachep, flags); + return fallback_alloc(cachep, flags, this_cpu); done: return obj; @@ -3257,6 +3380,7 @@ __cache_alloc_node(struct kmem_cache *ca void *caller) { unsigned long save_flags; + int this_cpu; void *ptr; lockdep_trace_alloc(flags); @@ -3265,32 +3389,33 @@ __cache_alloc_node(struct kmem_cache *ca return NULL; cache_alloc_debugcheck_before(cachep, flags); - local_irq_save(save_flags); + + slab_irq_save(save_flags, this_cpu); if (unlikely(nodeid == -1)) - nodeid = numa_node_id(); + nodeid = cpu_to_node(this_cpu); if (unlikely(!cachep->nodelists[nodeid])) { /* Node not bootstrapped yet */ - ptr = fallback_alloc(cachep, flags); + ptr = fallback_alloc(cachep, flags, &this_cpu); goto out; } - if (nodeid == numa_node_id()) { + if (nodeid == cpu_to_node(this_cpu)) { /* * Use the locally cached objects if possible. * However ____cache_alloc does not allow fallback * to other nodes. It may fail while we still have * objects on other nodes available. */ - ptr = ____cache_alloc(cachep, flags); + ptr = ____cache_alloc(cachep, flags, &this_cpu); if (ptr) goto out; } /* ___cache_alloc_node can fall back to other nodes */ - ptr = ____cache_alloc_node(cachep, flags, nodeid); + ptr = ____cache_alloc_node(cachep, flags, nodeid, &this_cpu); out: - local_irq_restore(save_flags); + slab_irq_restore(save_flags, this_cpu); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); if (likely(ptr)) @@ -3303,33 +3428,33 @@ __cache_alloc_node(struct kmem_cache *ca } static __always_inline void * -__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) +__do_cache_alloc(struct kmem_cache *cache, gfp_t flags, int *this_cpu) { void *objp; if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { - objp = alternate_node_alloc(cache, flags); + objp = alternate_node_alloc(cache, flags, this_cpu); if (objp) goto out; } - objp = ____cache_alloc(cache, flags); + objp = ____cache_alloc(cache, flags, this_cpu); /* * We may just have run out of memory on the local node. * ____cache_alloc_node() knows how to locate memory on other nodes */ - if (!objp) - objp = ____cache_alloc_node(cache, flags, numa_node_id()); - + if (!objp) + objp = ____cache_alloc_node(cache, flags, + cpu_to_node(*this_cpu), this_cpu); out: return objp; } #else static __always_inline void * -__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int *this_cpu) { - return ____cache_alloc(cachep, flags); + return ____cache_alloc(cachep, flags, this_cpu); } #endif /* CONFIG_NUMA */ @@ -3338,6 +3463,7 @@ static __always_inline void * __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) { unsigned long save_flags; + int this_cpu; void *objp; lockdep_trace_alloc(flags); @@ -3346,9 +3472,9 @@ __cache_alloc(struct kmem_cache *cachep, return NULL; cache_alloc_debugcheck_before(cachep, flags); - local_irq_save(save_flags); - objp = __do_cache_alloc(cachep, flags); - local_irq_restore(save_flags); + slab_irq_save(save_flags, this_cpu); + objp = __do_cache_alloc(cachep, flags, &this_cpu); + slab_irq_restore(save_flags, this_cpu); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); prefetchw(objp); @@ -3365,7 +3491,7 @@ __cache_alloc(struct kmem_cache *cachep, * Caller needs to acquire correct kmem_list's list_lock */ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, - int node) + int node, int *this_cpu) { int i; struct kmem_list3 *l3; @@ -3394,7 +3520,7 @@ static void free_block(struct kmem_cache * a different cache, refer to comments before * alloc_slabmgmt. */ - slab_destroy(cachep, slabp); + slab_destroy(cachep, slabp, this_cpu); } else { list_add(&slabp->list, &l3->slabs_free); } @@ -3408,11 +3534,12 @@ static void free_block(struct kmem_cache } } -static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) +static void +cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac, int *this_cpu) { int batchcount; struct kmem_list3 *l3; - int node = numa_node_id(); + int node = cpu_to_node(*this_cpu); batchcount = ac->batchcount; #if DEBUG @@ -3434,7 +3561,7 @@ static void cache_flusharray(struct kmem } } - free_block(cachep, ac->entry, batchcount, node); + free_block(cachep, ac->entry, batchcount, node, this_cpu); free_done: #if STATS { @@ -3463,9 +3590,9 @@ free_done: * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ -static inline void __cache_free(struct kmem_cache *cachep, void *objp) +static void __cache_free(struct kmem_cache *cachep, void *objp, int *this_cpu) { - struct array_cache *ac = cpu_cache_get(cachep); + struct array_cache *ac = cpu_cache_get(cachep, *this_cpu); check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); @@ -3479,7 +3606,7 @@ static inline void __cache_free(struct k * variable to skip the call, which is mostly likely to be present in * the cache. */ - if (numa_platform && cache_free_alien(cachep, objp)) + if (numa_platform && cache_free_alien(cachep, objp, this_cpu)) return; if (likely(ac->avail < ac->limit)) { @@ -3488,7 +3615,7 @@ static inline void __cache_free(struct k return; } else { STATS_INC_FREEMISS(cachep); - cache_flusharray(cachep, ac); + cache_flusharray(cachep, ac, this_cpu); ac->entry[ac->avail++] = objp; } } @@ -3689,13 +3816,14 @@ EXPORT_SYMBOL(__kmalloc); void kmem_cache_free(struct kmem_cache *cachep, void *objp) { unsigned long flags; + int this_cpu; - local_irq_save(flags); + slab_irq_save(flags, this_cpu); debug_check_no_locks_freed(objp, obj_size(cachep)); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, obj_size(cachep)); - __cache_free(cachep, objp); - local_irq_restore(flags); + __cache_free(cachep, objp, &this_cpu); + slab_irq_restore(flags, this_cpu); kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp); } @@ -3714,16 +3842,17 @@ void kfree(const void *objp) { struct kmem_cache *c; unsigned long flags; + int this_cpu; if (unlikely(ZERO_OR_NULL_PTR(objp))) return; - local_irq_save(flags); + slab_irq_save(flags, this_cpu); kfree_debugcheck(objp); c = virt_to_cache(objp); debug_check_no_locks_freed(objp, obj_size(c)); debug_check_no_obj_freed(objp, obj_size(c)); - __cache_free(c, (void *)objp); - local_irq_restore(flags); + __cache_free(c, (void *)objp, &this_cpu); + slab_irq_restore(flags, this_cpu); kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp); } @@ -3746,7 +3875,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name); */ static int alloc_kmemlist(struct kmem_cache *cachep) { - int node; + int node, this_cpu; struct kmem_list3 *l3; struct array_cache *new_shared; struct array_cache **new_alien = NULL; @@ -3774,11 +3903,11 @@ static int alloc_kmemlist(struct kmem_ca if (l3) { struct array_cache *shared = l3->shared; - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); if (shared) free_block(cachep, shared->entry, - shared->avail, node); + shared->avail, node, &this_cpu); l3->shared = new_shared; if (!l3->alien) { @@ -3787,7 +3916,7 @@ static int alloc_kmemlist(struct kmem_ca } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); kfree(shared); free_alien_cache(new_alien); continue; @@ -3834,42 +3963,50 @@ struct ccupdate_struct { struct array_cache *new[NR_CPUS]; }; -static void do_ccupdate_local(void *info) +static void __do_ccupdate_local(void *info, int this_cpu) { struct ccupdate_struct *new = info; struct array_cache *old; check_irq_off(); - old = cpu_cache_get(new->cachep); + old = cpu_cache_get(new->cachep, this_cpu); - new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; - new->new[smp_processor_id()] = old; + new->cachep->array[this_cpu] = new->new[this_cpu]; + new->new[this_cpu] = old; +} + +#ifdef CONFIG_PREEMPT_RT +static void do_ccupdate_local(void *arg, int this_cpu) +{ + __do_ccupdate_local(arg, this_cpu); +} +#else +static void do_ccupdate_local(void *arg) +{ + __do_ccupdate_local(arg, smp_processor_id()); } +#endif /* Always called with the cache_chain_mutex held */ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared) { - struct ccupdate_struct *new; - int i; - - new = kzalloc(sizeof(*new), GFP_KERNEL); - if (!new) - return -ENOMEM; + struct ccupdate_struct new; + int i, this_cpu; + memset(&new.new, 0, sizeof(new.new)); for_each_online_cpu(i) { - new->new[i] = alloc_arraycache(cpu_to_node(i), limit, + new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount); - if (!new->new[i]) { + if (!new.new[i]) { for (i--; i >= 0; i--) - kfree(new->new[i]); - kfree(new); + kfree(new.new[i]); return -ENOMEM; } } - new->cachep = cachep; + new.cachep = cachep; - on_each_cpu(do_ccupdate_local, (void *)new, 1); + slab_on_each_cpu(do_ccupdate_local, (void *)&new); check_irq_on(); cachep->batchcount = batchcount; @@ -3877,15 +4014,15 @@ static int do_tune_cpucache(struct kmem_ cachep->shared = shared; for_each_online_cpu(i) { - struct array_cache *ccold = new->new[i]; + struct array_cache *ccold = new.new[i]; if (!ccold) continue; - spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); - free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); - spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); + slab_spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock, this_cpu); + free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i), &this_cpu); + slab_spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock, this_cpu); kfree(ccold); } - kfree(new); + return alloc_kmemlist(cachep); } @@ -3949,26 +4086,26 @@ static int enable_cpucache(struct kmem_c * if drain_array() is used on the shared array. */ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, - struct array_cache *ac, int force, int node) + struct array_cache *ac, int force, int node) { - int tofree; + int tofree, this_cpu; if (!ac || !ac->avail) return; if (ac->touched && !force) { ac->touched = 0; } else { - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); if (ac->avail) { tofree = force ? ac->avail : (ac->limit + 4) / 5; if (tofree > ac->avail) tofree = (ac->avail + 1) / 2; - free_block(cachep, ac->entry, tofree, node); + free_block(cachep, ac->entry, tofree, node, &this_cpu); ac->avail -= tofree; memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); } - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); } } @@ -3986,11 +4123,12 @@ void drain_array(struct kmem_cache *cach */ static void cache_reap(struct work_struct *w) { + int this_cpu = raw_smp_processor_id(), node = cpu_to_node(this_cpu); struct kmem_cache *searchp; struct kmem_list3 *l3; - int node = numa_node_id(); struct delayed_work *work = container_of(w, struct delayed_work, work); + int work_done = 0; if (!mutex_trylock(&cache_chain_mutex)) /* Give up. Setup the next iteration. */ @@ -4006,9 +4144,10 @@ static void cache_reap(struct work_struc */ l3 = searchp->nodelists[node]; - reap_alien(searchp, l3); + reap_alien(searchp, l3, &this_cpu); - drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); + drain_array(searchp, l3, cpu_cache_get(searchp, this_cpu), + 0, node); /* * These are racy checks but it does not matter @@ -4097,7 +4236,7 @@ static int s_show(struct seq_file *m, vo unsigned long num_slabs, free_objects = 0, shared_avail = 0; const char *name; char *error = NULL; - int node; + int this_cpu, node; struct kmem_list3 *l3; active_objs = 0; @@ -4108,7 +4247,7 @@ static int s_show(struct seq_file *m, vo continue; check_irq_on(); - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); list_for_each_entry(slabp, &l3->slabs_full, list) { if (slabp->inuse != cachep->num && !error) @@ -4133,7 +4272,7 @@ static int s_show(struct seq_file *m, vo if (l3->shared) shared_avail += l3->shared->avail; - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); } num_slabs += active_slabs; num_objs = num_slabs * cachep->num; @@ -4342,7 +4481,7 @@ static int leaks_show(struct seq_file *m struct kmem_list3 *l3; const char *name; unsigned long *n = m->private; - int node; + int node, this_cpu; int i; if (!(cachep->flags & SLAB_STORE_USER)) @@ -4360,13 +4499,13 @@ static int leaks_show(struct seq_file *m continue; check_irq_on(); - spin_lock_irq(&l3->list_lock); + slab_spin_lock_irq(&l3->list_lock, this_cpu); list_for_each_entry(slabp, &l3->slabs_full, list) handle_slab(n, cachep, slabp); list_for_each_entry(slabp, &l3->slabs_partial, list) handle_slab(n, cachep, slabp); - spin_unlock_irq(&l3->list_lock); + slab_spin_unlock_irq(&l3->list_lock, this_cpu); } name = cachep->name; if (n[0] == n[1]) { patches/rt-page_alloc.patch0000664000076400007640000001261311156214146014773 0ustar tglxtglxSubject: rt-friendly per-cpu pages From: Ingo Molnar rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. Signed-off-by: Ingo Molnar --- mm/page_alloc.c | 100 ++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 75 insertions(+), 25 deletions(-) Index: linux-2.6-tip/mm/page_alloc.c =================================================================== --- linux-2.6-tip.orig/mm/page_alloc.c +++ linux-2.6-tip/mm/page_alloc.c @@ -163,6 +163,53 @@ static unsigned long __meminitdata dma_r EXPORT_SYMBOL(movable_zone); #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ +#ifdef CONFIG_PREEMPT_RT +static DEFINE_PER_CPU_LOCKED(int, pcp_locks); +#endif + +static inline void __lock_cpu_pcp(unsigned long *flags, int cpu) +{ +#ifdef CONFIG_PREEMPT_RT + spin_lock(&__get_cpu_lock(pcp_locks, cpu)); + flags = 0; +#else + local_irq_save(*flags); +#endif +} + +static inline void lock_cpu_pcp(unsigned long *flags, int *this_cpu) +{ +#ifdef CONFIG_PREEMPT_RT + (void)get_cpu_var_locked(pcp_locks, this_cpu); + flags = 0; +#else + local_irq_save(*flags); + *this_cpu = smp_processor_id(); +#endif +} + +static inline void unlock_cpu_pcp(unsigned long flags, int this_cpu) +{ +#ifdef CONFIG_PREEMPT_RT + put_cpu_var_locked(pcp_locks, this_cpu); +#else + local_irq_restore(flags); +#endif +} + +static struct per_cpu_pageset * +get_zone_pcp(struct zone *zone, unsigned long *flags, int *this_cpu) +{ + lock_cpu_pcp(flags, this_cpu); + return zone_pcp(zone, *this_cpu); +} + +static void +put_zone_pcp(struct zone *zone, unsigned long flags, int this_cpu) +{ + unlock_cpu_pcp(flags, this_cpu); +} + #if MAX_NUMNODES > 1 int nr_node_ids __read_mostly = MAX_NUMNODES; EXPORT_SYMBOL(nr_node_ids); @@ -547,8 +594,7 @@ static void free_one_page(struct zone *z static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; - int i; - int bad = 0; + int i, this_cpu, bad = 0; kmemcheck_free_shadow(page, order); @@ -565,10 +611,10 @@ static void __free_pages_ok(struct page arch_free_page(page, order); kernel_map_pages(page, 1 << order, 0); - local_irq_save(flags); - __count_vm_events(PGFREE, 1 << order); + lock_cpu_pcp(&flags, &this_cpu); + count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, order); - local_irq_restore(flags); + unlock_cpu_pcp(flags, this_cpu); } /* @@ -901,15 +947,16 @@ void drain_zone_pages(struct zone *zone, { unsigned long flags; int to_drain; + int this_cpu; - local_irq_save(flags); + lock_cpu_pcp(&flags, &this_cpu); if (pcp->count >= pcp->batch) to_drain = pcp->batch; else to_drain = pcp->count; free_pages_bulk(zone, to_drain, &pcp->list, 0); pcp->count -= to_drain; - local_irq_restore(flags); + unlock_cpu_pcp(flags, this_cpu); } #endif @@ -933,12 +980,15 @@ static void drain_pages(unsigned int cpu continue; pset = zone_pcp(zone, cpu); - + if (!pset) { + WARN_ON(1); + continue; + } pcp = &pset->pcp; - local_irq_save(flags); + lock_cpu_pcp(&flags, &cpu); free_pages_bulk(zone, pcp->count, &pcp->list, 0); pcp->count = 0; - local_irq_restore(flags); + unlock_cpu_pcp(flags, cpu); } } @@ -1000,8 +1050,10 @@ void mark_free_pages(struct zone *zone) static void free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); + struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; unsigned long flags; + int this_cpu; kmemcheck_free_shadow(page, 0); @@ -1017,9 +1069,11 @@ static void free_hot_cold_page(struct pa arch_free_page(page, 0); kernel_map_pages(page, 1, 0); - pcp = &zone_pcp(zone, get_cpu())->pcp; - local_irq_save(flags); - __count_vm_event(PGFREE); + pset = get_zone_pcp(zone, &flags, &this_cpu); + pcp = &pset->pcp; + + count_vm_event(PGFREE); + if (cold) list_add_tail(&page->lru, &pcp->list); else @@ -1030,8 +1084,7 @@ static void free_hot_cold_page(struct pa free_pages_bulk(zone, pcp->batch, &pcp->list, 0); pcp->count -= pcp->batch; } - local_irq_restore(flags); - put_cpu(); + put_zone_pcp(zone, flags, this_cpu); } void free_hot_page(struct page *page) @@ -1083,16 +1136,15 @@ static struct page *buffered_rmqueue(str unsigned long flags; struct page *page; int cold = !!(gfp_flags & __GFP_COLD); - int cpu; + struct per_cpu_pageset *pset; int migratetype = allocflags_to_migratetype(gfp_flags); + int this_cpu; again: - cpu = get_cpu(); + pset = get_zone_pcp(zone, &flags, &this_cpu); if (likely(order == 0)) { - struct per_cpu_pages *pcp; + struct per_cpu_pages *pcp = &pset->pcp; - pcp = &zone_pcp(zone, cpu)->pcp; - local_irq_save(flags); if (!pcp->count) { pcp->count = rmqueue_bulk(zone, 0, pcp->batch, &pcp->list, migratetype); @@ -1121,7 +1173,7 @@ again: list_del(&page->lru); pcp->count--; } else { - spin_lock_irqsave(&zone->lock, flags); + spin_lock(&zone->lock); page = __rmqueue(zone, order, migratetype); spin_unlock(&zone->lock); if (!page) @@ -1130,8 +1182,7 @@ again: __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone); - local_irq_restore(flags); - put_cpu(); + put_zone_pcp(zone, flags, this_cpu); VM_BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) @@ -1139,8 +1190,7 @@ again: return page; failed: - local_irq_restore(flags); - put_cpu(); + put_zone_pcp(zone, flags, this_cpu); return NULL; } patches/rt-mutex-preempt-debugging.patch0000664000076400007640000001275511160544612017460 0ustar tglxtglxSubject: rt: mutex preempt debugging From: Ingo Molnar Date: Wed Feb 04 00:03:06 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/preempt.h | 18 +++++++++++++++--- include/linux/smp.h | 2 +- init/main.c | 2 +- kernel/sched.c | 24 ++++++++++++++++++++++-- kernel/softirq.c | 6 +++--- 5 files changed, 42 insertions(+), 10 deletions(-) Index: linux-2.6-tip/include/linux/preempt.h =================================================================== --- linux-2.6-tip.orig/include/linux/preempt.h +++ linux-2.6-tip/include/linux/preempt.h @@ -9,6 +9,7 @@ #include #include #include +#include #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void add_preempt_count(int val); @@ -21,11 +22,12 @@ #define inc_preempt_count() add_preempt_count(1) #define dec_preempt_count() sub_preempt_count(1) -#define preempt_count() (current_thread_info()->preempt_count) +#define preempt_count() (current_thread_info()->preempt_count) #ifdef CONFIG_PREEMPT asmlinkage void preempt_schedule(void); +asmlinkage void preempt_schedule_irq(void); #define preempt_disable() \ do { \ @@ -33,12 +35,19 @@ do { \ barrier(); \ } while (0) -#define preempt_enable_no_resched() \ +#define __preempt_enable_no_resched() \ do { \ barrier(); \ dec_preempt_count(); \ } while (0) + +#ifdef CONFIG_DEBUG_PREEMPT +extern void notrace preempt_enable_no_resched(void); +#else +# define preempt_enable_no_resched() __preempt_enable_no_resched() +#endif + #define preempt_check_resched() \ do { \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ @@ -47,7 +56,7 @@ do { \ #define preempt_enable() \ do { \ - preempt_enable_no_resched(); \ + __preempt_enable_no_resched(); \ barrier(); \ preempt_check_resched(); \ } while (0) @@ -84,6 +93,7 @@ do { \ #define preempt_disable() do { } while (0) #define preempt_enable_no_resched() do { } while (0) +#define __preempt_enable_no_resched() do { } while (0) #define preempt_enable() do { } while (0) #define preempt_check_resched() do { } while (0) @@ -91,6 +101,8 @@ do { \ #define preempt_enable_no_resched_notrace() do { } while (0) #define preempt_enable_notrace() do { } while (0) +#define preempt_schedule_irq() do { } while (0) + #endif #ifdef CONFIG_PREEMPT_NOTIFIERS Index: linux-2.6-tip/include/linux/smp.h =================================================================== --- linux-2.6-tip.orig/include/linux/smp.h +++ linux-2.6-tip/include/linux/smp.h @@ -177,7 +177,7 @@ static inline void init_call_single_data #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() -#define put_cpu_no_resched() preempt_enable_no_resched() +#define put_cpu_no_resched() __preempt_enable_no_resched() /* * Callback to arch code if there's nosmp or maxcpus=0 on the Index: linux-2.6-tip/init/main.c =================================================================== --- linux-2.6-tip.orig/init/main.c +++ linux-2.6-tip/init/main.c @@ -468,7 +468,7 @@ static noinline void __init_refok rest_i */ init_idle_bootup_task(current); rcu_scheduler_starting(); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); schedule(); preempt_disable(); Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -2339,6 +2339,26 @@ static int sched_balance_self(int cpu, i #endif /* CONFIG_SMP */ +#ifdef CONFIG_DEBUG_PREEMPT +void notrace preempt_enable_no_resched(void) +{ + static int once = 1; + + barrier(); + dec_preempt_count(); + + if (once && !preempt_count()) { + once = 0; + printk(KERN_ERR "BUG: %s:%d task might have lost a preemption check!\n", + current->comm, current->pid); + dump_stack(); + } +} + +EXPORT_SYMBOL(preempt_enable_no_resched); +#endif + + /** * task_oncpu_function_call - call a function on the cpu on which a task runs * @p: the task to evaluate @@ -4844,7 +4864,7 @@ asmlinkage void __sched schedule(void) need_resched: preempt_disable(); __schedule(); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } @@ -8860,7 +8880,7 @@ void __init sched_init(void) scheduler_running = 1; } -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) void __might_sleep(char *file, int line) { #ifdef in_atomic Index: linux-2.6-tip/kernel/softirq.c =================================================================== --- linux-2.6-tip.orig/kernel/softirq.c +++ linux-2.6-tip/kernel/softirq.c @@ -386,7 +386,7 @@ void irq_exit(void) if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) tick_nohz_stop_sched_tick(0); #endif - preempt_enable_no_resched(); + __preempt_enable_no_resched(); } /* @@ -723,7 +723,7 @@ static int ksoftirqd(void * __data) while (!kthread_should_stop()) { preempt_disable(); if (!(local_softirq_pending() & mask)) { - preempt_enable_no_resched(); + __preempt_enable_no_resched(); schedule(); preempt_disable(); } @@ -742,7 +742,7 @@ static int ksoftirqd(void * __data) goto wait_to_die; local_irq_disable(); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); set_softirq_pending(local_softirq_pending() & ~mask); local_bh_disable(); local_irq_enable(); patches/rt-mutex-trivial-tcp-preempt-fix.patch0000664000076400007640000000147611150327144020543 0ustar tglxtglxSubject: rt: mutex trivial tcp preempt fix From: Ingo Molnar Date: Wed Feb 04 00:03:05 CET 2009 Signed-off-by: Ingo Molnar --- net/ipv4/tcp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-2.6-tip/net/ipv4/tcp.c =================================================================== --- linux-2.6-tip.orig/net/ipv4/tcp.c +++ linux-2.6-tip/net/ipv4/tcp.c @@ -1322,11 +1322,11 @@ int tcp_recvmsg(struct kiocb *iocb, stru (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && !sysctl_tcp_low_latency && dma_find_channel(DMA_MEMCPY)) { - preempt_enable_no_resched(); + preempt_enable(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); } else { - preempt_enable_no_resched(); + preempt_enable(); } } #endif patches/rt-mutex-trivial-route-cast-fix.patch0000664000076400007640000000123411150327144020361 0ustar tglxtglxSubject: rt: mutex trivial route cast fix From: Ingo Molnar Date: Wed Feb 04 00:03:05 CET 2009 Signed-off-by: Ingo Molnar --- net/ipv4/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/net/ipv4/route.c =================================================================== --- linux-2.6-tip.orig/net/ipv4/route.c +++ linux-2.6-tip/net/ipv4/route.c @@ -242,7 +242,7 @@ static __init void rt_hash_lock_init(voi spin_lock_init(&rt_hash_locks[i]); } #else -# define rt_hash_lock_addr(slot) NULL +# define rt_hash_lock_addr(slot) ((spinlock_t *)NULL) static inline void rt_hash_lock_init(void) { patches/rt-mutex-i386.patch0000664000076400007640000003364011160527551014543 0ustar tglxtglxSubject: rt: mutex i386 From: Ingo Molnar Date: Tue Feb 03 23:55:11 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 12 ++++++-- arch/x86/include/asm/rwsem.h | 51 +++++++++++++++++----------------- arch/x86/include/asm/spinlock.h | 46 +++++++++++++++--------------- arch/x86/include/asm/spinlock_types.h | 4 +- arch/x86/kernel/entry_32.S | 4 +- arch/x86/kernel/process_32.c | 2 - 6 files changed, 64 insertions(+), 55 deletions(-) Index: linux-2.6-tip/arch/x86/Kconfig =================================================================== --- linux-2.6-tip.orig/arch/x86/Kconfig +++ linux-2.6-tip/arch/x86/Kconfig @@ -115,10 +115,18 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y config RWSEM_GENERIC_SPINLOCK - def_bool !X86_XADD + bool + depends on !X86_XADD || PREEMPT_RT + default y + +config ASM_SEMAPHORES + bool + default y config RWSEM_XCHGADD_ALGORITHM - def_bool X86_XADD + bool + depends on X86_XADD && !RWSEM_GENERIC_SPINLOCK + default y config ARCH_HAS_CPU_IDLE_WAIT def_bool y Index: linux-2.6-tip/arch/x86/include/asm/rwsem.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/rwsem.h +++ linux-2.6-tip/arch/x86/include/asm/rwsem.h @@ -44,14 +44,14 @@ struct rwsem_waiter; -extern asmregparm struct rw_semaphore * - rwsem_down_read_failed(struct rw_semaphore *sem); -extern asmregparm struct rw_semaphore * - rwsem_down_write_failed(struct rw_semaphore *sem); -extern asmregparm struct rw_semaphore * - rwsem_wake(struct rw_semaphore *); -extern asmregparm struct rw_semaphore * - rwsem_downgrade_wake(struct rw_semaphore *sem); +extern asmregparm struct compat_rw_semaphore * + rwsem_down_read_failed(struct compat_rw_semaphore *sem); +extern asmregparm struct compat_rw_semaphore * + rwsem_down_write_failed(struct compat_rw_semaphore *sem); +extern asmregparm struct compat_rw_semaphore * + rwsem_wake(struct compat_rw_semaphore *); +extern asmregparm struct compat_rw_semaphore * + rwsem_downgrade_wake(struct compat_rw_semaphore *sem); /* * the semaphore definition @@ -64,7 +64,7 @@ extern asmregparm struct rw_semaphore * #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -struct rw_semaphore { +struct compat_rw_semaphore { signed long count; spinlock_t wait_lock; struct list_head wait_list; @@ -86,23 +86,23 @@ struct rw_semaphore { LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define COMPAT_DECLARE_RWSEM(name) \ + struct compat_rw_semaphore name = __RWSEM_INITIALIZER(name) -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, +extern void __compat_init_rwsem(struct compat_rw_semaphore *sem, const char *name, struct lock_class_key *key); -#define init_rwsem(sem) \ +#define compat_init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __compat_init_rwsem((sem), #sem, &__key); \ } while (0) /* * lock for reading */ -static inline void __down_read(struct rw_semaphore *sem) +static inline void __down_read(struct compat_rw_semaphore *sem) { asm volatile("# beginning down_read\n\t" LOCK_PREFIX " incl (%%eax)\n\t" @@ -119,7 +119,7 @@ static inline void __down_read(struct rw /* * trylock for reading -- returns 1 if successful, 0 if contention */ -static inline int __down_read_trylock(struct rw_semaphore *sem) +static inline int __down_read_trylock(struct compat_rw_semaphore *sem) { __s32 result, tmp; asm volatile("# beginning __down_read_trylock\n\t" @@ -141,7 +141,8 @@ static inline int __down_read_trylock(st /* * lock for writing */ -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +static inline void +__down_write_nested(struct compat_rw_semaphore *sem, int subclass) { int tmp; @@ -160,7 +161,7 @@ static inline void __down_write_nested(s : "memory", "cc"); } -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write(struct compat_rw_semaphore *sem) { __down_write_nested(sem, 0); } @@ -168,7 +169,7 @@ static inline void __down_write(struct r /* * trylock for writing -- returns 1 if successful, 0 if contention */ -static inline int __down_write_trylock(struct rw_semaphore *sem) +static inline int __down_write_trylock(struct compat_rw_semaphore *sem) { signed long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, @@ -181,7 +182,7 @@ static inline int __down_write_trylock(s /* * unlock after reading */ -static inline void __up_read(struct rw_semaphore *sem) +static inline void __up_read(struct compat_rw_semaphore *sem) { __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; asm volatile("# beginning __up_read\n\t" @@ -199,7 +200,7 @@ static inline void __up_read(struct rw_s /* * unlock after writing */ -static inline void __up_write(struct rw_semaphore *sem) +static inline void __up_write(struct compat_rw_semaphore *sem) { asm volatile("# beginning __up_write\n\t" " movl %2,%%edx\n\t" @@ -218,7 +219,7 @@ static inline void __up_write(struct rw_ /* * downgrade write lock to read lock */ -static inline void __downgrade_write(struct rw_semaphore *sem) +static inline void __downgrade_write(struct compat_rw_semaphore *sem) { asm volatile("# beginning __downgrade_write\n\t" LOCK_PREFIX " addl %2,(%%eax)\n\t" @@ -235,7 +236,7 @@ static inline void __downgrade_write(str /* * implement atomic add functionality */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +static inline void rwsem_atomic_add(int delta, struct compat_rw_semaphore *sem) { asm volatile(LOCK_PREFIX "addl %1,%0" : "+m" (sem->count) @@ -245,7 +246,7 @@ static inline void rwsem_atomic_add(int /* * implement exchange and add functionality */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct compat_rw_semaphore *sem) { int tmp = delta; @@ -256,7 +257,7 @@ static inline int rwsem_atomic_update(in return tmp + delta; } -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline int rwsem_is_locked(struct compat_rw_semaphore *sem) { return (sem->count != 0); } Index: linux-2.6-tip/arch/x86/include/asm/spinlock.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/spinlock.h +++ linux-2.6-tip/arch/x86/include/asm/spinlock.h @@ -58,7 +58,7 @@ #if (NR_CPUS < 256) #define TICKET_SHIFT 8 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(__raw_spinlock_t *lock) { short inc = 0x0100; @@ -77,7 +77,7 @@ static __always_inline void __ticket_spi : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(__raw_spinlock_t *lock) { int tmp, new; @@ -96,7 +96,7 @@ static __always_inline int __ticket_spin return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(__raw_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock) @@ -106,7 +106,7 @@ static __always_inline void __ticket_spi #else #define TICKET_SHIFT 16 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(__raw_spinlock_t *lock) { int inc = 0x00010000; int tmp; @@ -127,7 +127,7 @@ static __always_inline void __ticket_spi : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(__raw_spinlock_t *lock) { int tmp; int new; @@ -149,7 +149,7 @@ static __always_inline int __ticket_spin return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(__raw_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock) @@ -158,14 +158,14 @@ static __always_inline void __ticket_spi } #endif -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(__raw_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(__raw_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); @@ -174,33 +174,33 @@ static inline int __ticket_spin_is_conte #ifndef CONFIG_PARAVIRT -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(__raw_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __raw_spin_is_contended(__raw_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } #define __raw_spin_is_contended __raw_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(__raw_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(__raw_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(__raw_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void __raw_spin_lock_flags(__raw_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); @@ -208,7 +208,7 @@ static __always_inline void __raw_spin_l #endif -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(__raw_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); @@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wai * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int __raw_read_can_lock(__raw_rwlock_t *lock) { return (int)(lock)->lock > 0; } @@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(ra * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int __raw_write_can_lock(__raw_rwlock_t *lock) { return (lock)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" "jns 1f\n" @@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_r ::LOCK_PTR_REG (rw) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" "jz 1f\n" @@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_ ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int __raw_read_trylock(__raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int __raw_write_trylock(__raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -284,12 +284,12 @@ static inline int __raw_write_trylock(ra return 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(__raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl %1, %0" : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); Index: linux-2.6-tip/arch/x86/include/asm/spinlock_types.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/spinlock_types.h +++ linux-2.6-tip/arch/x86/include/asm/spinlock_types.h @@ -7,13 +7,13 @@ typedef struct raw_spinlock { unsigned int slock; -} raw_spinlock_t; +} __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } typedef struct { unsigned int lock; -} raw_rwlock_t; +} __raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } Index: linux-2.6-tip/arch/x86/kernel/entry_32.S =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/entry_32.S +++ linux-2.6-tip/arch/x86/kernel/entry_32.S @@ -613,7 +613,7 @@ ENDPROC(system_call) ALIGN RING0_PTREGS_FRAME # can't unwind into user space anyway work_pending: - testb $_TIF_NEED_RESCHED, %cl + testl $(_TIF_NEED_RESCHED), %ecx jz work_notifysig work_resched: call schedule @@ -626,7 +626,7 @@ work_resched: andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? jz restore_all - testb $_TIF_NEED_RESCHED, %cl + testl $(_TIF_NEED_RESCHED), %ecx jnz work_resched work_notifysig: # deal with pending signals and Index: linux-2.6-tip/arch/x86/kernel/process_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/process_32.c +++ linux-2.6-tip/arch/x86/kernel/process_32.c @@ -121,7 +121,7 @@ void cpu_idle(void) start_critical_timings(); } tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); schedule(); preempt_disable(); } patches/rt-mutex-x86-64.patch0000664000076400007640000000604311160527551014723 0ustar tglxtglxSubject: rt: mutex x86 64 From: Ingo Molnar Date: Wed Feb 04 00:03:04 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- arch/x86/kernel/dumpstack.c | 8 ++++---- arch/x86/kernel/tsc_sync.c | 2 +- arch/x86/kernel/vsyscall_64.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) Index: linux-2.6-tip/arch/x86/Kconfig =================================================================== --- linux-2.6-tip.orig/arch/x86/Kconfig +++ linux-2.6-tip/arch/x86/Kconfig @@ -125,7 +125,7 @@ config ASM_SEMAPHORES config RWSEM_XCHGADD_ALGORITHM bool - depends on X86_XADD && !RWSEM_GENERIC_SPINLOCK + depends on X86_XADD && !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT default y config ARCH_HAS_CPU_IDLE_WAIT Index: linux-2.6-tip/arch/x86/kernel/dumpstack.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/dumpstack.c +++ linux-2.6-tip/arch/x86/kernel/dumpstack.c @@ -188,7 +188,7 @@ void dump_stack(void) } EXPORT_SYMBOL(dump_stack); -static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static raw_spinlock_t die_lock = RAW_SPIN_LOCK_UNLOCKED(die_lock); static int die_owner = -1; static unsigned int die_nest_count; @@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void) /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); - if (!__raw_spin_trylock(&die_lock)) { + if (!spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else - __raw_spin_lock(&die_lock); + spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; @@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long fl die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ - __raw_spin_unlock(&die_lock); + spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); Index: linux-2.6-tip/arch/x86/kernel/tsc_sync.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/tsc_sync.c +++ linux-2.6-tip/arch/x86/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata __raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; static __cpuinitdata int nr_warps; Index: linux-2.6-tip/arch/x86/kernel/vsyscall_64.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/vsyscall_64.c +++ linux-2.6-tip/arch/x86/kernel/vsyscall_64.c @@ -59,7 +59,7 @@ int __vgetcpu_mode __section_vgetcpu_mod struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data = { - .lock = SEQLOCK_UNLOCKED, + .lock = __RAW_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), .sysctl_enabled = 1, }; patches/rt-mutex-core.patch0000664000076400007640000053461011160544611015001 0ustar tglxtglxSubject: rt: mutex core From: Ingo Molnar Date: Tue Feb 03 23:55:27 CET 2009 Signed-off-by: Ingo Molnar --- drivers/input/ff-memless.c | 1 fs/proc/array.c | 27 + include/linux/bit_spinlock.h | 4 include/linux/init_task.h | 3 include/linux/mutex.h | 63 +++ include/linux/pickop.h | 32 + include/linux/plist.h | 4 include/linux/rt_lock.h | 286 +++++++++++++++ include/linux/rtmutex.h | 6 include/linux/rwsem-spinlock.h | 35 - include/linux/rwsem.h | 108 ++++- include/linux/sched.h | 80 +++- include/linux/semaphore.h | 77 +++- include/linux/seqlock.h | 270 ++++++++++++-- include/linux/spinlock.h | 686 +++++++++++++++++++++++++------------- include/linux/spinlock_api_smp.h | 91 ++--- include/linux/spinlock_api_up.h | 74 ++-- include/linux/spinlock_types.h | 61 ++- include/linux/spinlock_types_up.h | 6 include/linux/spinlock_up.h | 8 kernel/Makefile | 6 kernel/fork.c | 10 kernel/futex.c | 4 kernel/lockdep.c | 2 kernel/rt.c | 634 +++++++++++++++++++++++++++++++++++ kernel/rtmutex-debug.c | 108 ++--- kernel/rtmutex.c | 450 ++++++++++++++++++++++-- kernel/rwsem.c | 44 +- kernel/sched.c | 66 ++- kernel/sched_clock.c | 4 kernel/semaphore.c | 46 +- kernel/spinlock.c | 278 +++++++++------ lib/dec_and_lock.c | 4 lib/kernel_lock.c | 4 lib/locking-selftest.c | 6 lib/plist.c | 2 lib/rwsem-spinlock.c | 29 - lib/rwsem.c | 6 lib/spinlock_debug.c | 64 +-- 39 files changed, 2920 insertions(+), 769 deletions(-) Index: linux-2.6-tip/drivers/input/ff-memless.c =================================================================== --- linux-2.6-tip.orig/drivers/input/ff-memless.c +++ linux-2.6-tip/drivers/input/ff-memless.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include Index: linux-2.6-tip/fs/proc/array.c =================================================================== --- linux-2.6-tip.orig/fs/proc/array.c +++ linux-2.6-tip/fs/proc/array.c @@ -133,12 +133,13 @@ static inline void task_name(struct seq_ */ static const char *task_state_array[] = { "R (running)", /* 0 */ - "S (sleeping)", /* 1 */ - "D (disk sleep)", /* 2 */ - "T (stopped)", /* 4 */ - "T (tracing stop)", /* 8 */ - "Z (zombie)", /* 16 */ - "X (dead)" /* 32 */ + "M (running-mutex)", /* 1 */ + "S (sleeping)", /* 2 */ + "D (disk sleep)", /* 4 */ + "T (stopped)", /* 8 */ + "T (tracing stop)", /* 16 */ + "Z (zombie)", /* 32 */ + "X (dead)" /* 64 */ }; static inline const char *get_task_state(struct task_struct *tsk) @@ -320,6 +321,19 @@ static inline void task_context_switch_c p->nivcsw); } +#define get_blocked_on(t) (-1) + +static inline void show_blocked_on(struct seq_file *m, struct task_struct *p) +{ + pid_t pid = get_blocked_on(p); + + if (pid < 0) + return; + + seq_printf(m, "BlckOn: %d\n", pid); +} + + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { @@ -339,6 +353,7 @@ int proc_pid_status(struct seq_file *m, task_show_regs(m, task); #endif task_context_switch_counts(m, task); + show_blocked_on(m, task); return 0; } Index: linux-2.6-tip/include/linux/bit_spinlock.h =================================================================== --- linux-2.6-tip.orig/include/linux/bit_spinlock.h +++ linux-2.6-tip/include/linux/bit_spinlock.h @@ -1,6 +1,8 @@ #ifndef __LINUX_BIT_SPINLOCK_H #define __LINUX_BIT_SPINLOCK_H +#if 0 + /* * bit-based spin_lock() * @@ -91,5 +93,7 @@ static inline int bit_spin_is_locked(int #endif } +#endif + #endif /* __LINUX_BIT_SPINLOCK_H */ Index: linux-2.6-tip/include/linux/init_task.h =================================================================== --- linux-2.6-tip.orig/include/linux/init_task.h +++ linux-2.6-tip/include/linux/init_task.h @@ -10,6 +10,7 @@ #include #include #include +#include extern struct files_struct init_files; extern struct fs_struct init_fs; @@ -184,8 +185,8 @@ extern struct cred init_cred; .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ - .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .timer_slack_ns = 50000, /* 50 usec default slack */ \ + .pi_lock = RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ Index: linux-2.6-tip/include/linux/mutex.h =================================================================== --- linux-2.6-tip.orig/include/linux/mutex.h +++ linux-2.6-tip/include/linux/mutex.h @@ -12,11 +12,73 @@ #include #include +#include #include #include #include +#ifdef CONFIG_PREEMPT_RT + +#include + +struct mutex { + struct rt_mutex lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ + { \ + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ + } + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void +_mutex_init(struct mutex *lock, char *name, struct lock_class_key *key); + +extern void __lockfunc _mutex_lock(struct mutex *lock); +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); +extern int __lockfunc _mutex_lock_killable(struct mutex *lock); +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_trylock(struct mutex *lock); +extern void __lockfunc _mutex_unlock(struct mutex *lock); + +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) +#define mutex_lock(l) _mutex_lock(l) +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) +#define mutex_lock_killable(l) _mutex_lock_killable(l) +#define mutex_trylock(l) _mutex_trylock(l) +#define mutex_unlock(l) _mutex_unlock(l) +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible_nested(l, s) +# define mutex_lock_killable_nested(l, s) \ + _mutex_lock_killable_nested(l, s) +#else +# define mutex_lock_nested(l, s) _mutex_lock(l) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible(l) +# define mutex_lock_killable_nested(l, s) \ + _mutex_lock_killable(l) +#endif + +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + _mutex_init((mutex), #mutex, &__key); \ +} while (0) + +#else /* * Simple, straightforward mutexes with strict semantics: * @@ -152,3 +214,4 @@ extern int mutex_trylock(struct mutex *l extern void mutex_unlock(struct mutex *lock); #endif +#endif Index: linux-2.6-tip/include/linux/pickop.h =================================================================== --- /dev/null +++ linux-2.6-tip/include/linux/pickop.h @@ -0,0 +1,32 @@ +#ifndef _LINUX_PICKOP_H +#define _LINUX_PICKOP_H + +#undef PICK_TYPE_EQUAL +#define PICK_TYPE_EQUAL(var, type) \ + __builtin_types_compatible_p(typeof(var), type) + +extern int __bad_func_type(void); + +#define PICK_FUNCTION(type1, type2, func1, func2, arg0, ...) \ +do { \ + if (PICK_TYPE_EQUAL((arg0), type1)) \ + func1((type1)(arg0), ##__VA_ARGS__); \ + else if (PICK_TYPE_EQUAL((arg0), type2)) \ + func2((type2)(arg0), ##__VA_ARGS__); \ + else __bad_func_type(); \ +} while (0) + +#define PICK_FUNCTION_RET(type1, type2, func1, func2, arg0, ...) \ +({ \ + unsigned long __ret; \ + \ + if (PICK_TYPE_EQUAL((arg0), type1)) \ + __ret = func1((type1)(arg0), ##__VA_ARGS__); \ + else if (PICK_TYPE_EQUAL((arg0), type2)) \ + __ret = func2((type2)(arg0), ##__VA_ARGS__); \ + else __ret = __bad_func_type(); \ + \ + __ret; \ +}) + +#endif /* _LINUX_PICKOP_H */ Index: linux-2.6-tip/include/linux/plist.h =================================================================== --- linux-2.6-tip.orig/include/linux/plist.h +++ linux-2.6-tip/include/linux/plist.h @@ -81,7 +81,7 @@ struct plist_head { struct list_head prio_list; struct list_head node_list; #ifdef CONFIG_DEBUG_PI_LIST - spinlock_t *lock; + raw_spinlock_t *lock; #endif }; @@ -128,7 +128,7 @@ struct plist_node { * @lock: list spinlock, remembered for debugging */ static inline void -plist_head_init(struct plist_head *head, spinlock_t *lock) +plist_head_init(struct plist_head *head, raw_spinlock_t *lock) { INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->node_list); Index: linux-2.6-tip/include/linux/rt_lock.h =================================================================== --- /dev/null +++ linux-2.6-tip/include/linux/rt_lock.h @@ -0,0 +1,286 @@ +#ifndef __LINUX_RT_LOCK_H +#define __LINUX_RT_LOCK_H + +/* + * Real-Time Preemption Support + * + * started by Ingo Molnar: + * + * Copyright (C) 2004, 2005 Red Hat, Inc., Ingo Molnar + * + * This file contains the main data structure definitions. + */ +#include +#include +#include + +#ifdef CONFIG_PREEMPT_RT +/* + * spinlocks - an RT mutex plus lock-break field: + */ +typedef struct { + struct rt_mutex lock; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __SPIN_LOCK_UNLOCKED(name) \ + (spinlock_t) { { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) \ + , .save_state = 1, .file = __FILE__, .line = __LINE__ }, SPIN_DEP_MAP_INIT(name) } +#else +# define __SPIN_LOCK_UNLOCKED(name) \ + (spinlock_t) { { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) }, SPIN_DEP_MAP_INIT(name) } +#endif +# define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(spin_old_style) +#else /* !PREEMPT_RT */ + typedef raw_spinlock_t spinlock_t; +# ifdef CONFIG_DEBUG_SPINLOCK +# define _SPIN_LOCK_UNLOCKED \ + { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + .magic = SPINLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1 } +# else +# define _SPIN_LOCK_UNLOCKED \ + { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } +# endif +# define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED +# define __SPIN_LOCK_UNLOCKED(name) _SPIN_LOCK_UNLOCKED +#endif + +#define __DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) + +#ifdef CONFIG_PREEMPT_RT + +/* + * RW-semaphores are a spinlock plus a reader-depth count. + * + * Note that the semantics are different from the usual + * Linux rw-sems, in PREEMPT_RT mode we do not allow + * multiple readers to hold the lock at once, we only allow + * a read-lock owner to read-lock recursively. This is + * better for latency, makes the implementation inherently + * fair and makes it simpler as well: + */ +struct rw_semaphore { + struct rt_mutex lock; + int read_depth; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +/* + * rwlocks - an RW semaphore plus lock-break field: + */ +typedef struct { + struct rt_mutex lock; + int read_depth; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +# ifdef CONFIG_DEBUG_RT_MUTEXES +# define __RW_LOCK_UNLOCKED(name) (rwlock_t) \ + { .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name), \ + .save_state = 1, .file = __FILE__, .line = __LINE__ } } +# else +# define __RW_LOCK_UNLOCKED(name) (rwlock_t) \ + { .lock = { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name) } } +# endif +#else /* !PREEMPT_RT */ + + typedef raw_rwlock_t rwlock_t; +# ifdef CONFIG_DEBUG_SPINLOCK +# define _RW_LOCK_UNLOCKED \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + .magic = RWLOCK_MAGIC, \ + .owner = SPINLOCK_OWNER_INIT, \ + .owner_cpu = -1 } +# else +# define _RW_LOCK_UNLOCKED \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } +# endif +# define __RW_LOCK_UNLOCKED(name) _RW_LOCK_UNLOCKED +#endif + +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(rw_old_style) + +#define DEFINE_RWLOCK(name) \ + rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + +#ifdef CONFIG_PREEMPT_RT + +/* + * Semaphores - a spinlock plus the semaphore count: + */ +struct semaphore { + atomic_t count; + struct rt_mutex lock; +}; + +#define DECLARE_MUTEX(name) \ +struct semaphore name = \ + { .count = { 1 }, .lock = __RT_MUTEX_INITIALIZER(name.lock) } + +extern void +__sema_init(struct semaphore *sem, int val, char *name, char *file, int line); + +#define rt_sema_init(sem, val) \ + __sema_init(sem, val, #sem, __FILE__, __LINE__) + +extern void +__init_MUTEX(struct semaphore *sem, char *name, char *file, int line); +#define rt_init_MUTEX(sem) \ + __init_MUTEX(sem, #sem, __FILE__, __LINE__) + +extern void there_is_no_init_MUTEX_LOCKED_for_RT_semaphores(void); + +/* + * No locked initialization for RT semaphores + */ +#define rt_init_MUTEX_LOCKED(sem) \ + there_is_no_init_MUTEX_LOCKED_for_RT_semaphores() +extern void rt_down(struct semaphore *sem); +extern int rt_down_interruptible(struct semaphore *sem); +extern int rt_down_timeout(struct semaphore *sem, long jiffies); +extern int rt_down_trylock(struct semaphore *sem); +extern void rt_up(struct semaphore *sem); + +#define rt_sem_is_locked(s) rt_mutex_is_locked(&(s)->lock) +#define rt_sema_count(s) atomic_read(&(s)->count) + +extern int __bad_func_type(void); + +#include + +/* + * PICK_SEM_OP() is a small redirector to allow less typing of the lock + * types struct compat_semaphore, struct semaphore, at the front of the + * PICK_FUNCTION macro. + */ +#define PICK_SEM_OP(...) PICK_FUNCTION(struct compat_semaphore *, \ + struct semaphore *, ##__VA_ARGS__) +#define PICK_SEM_OP_RET(...) PICK_FUNCTION_RET(struct compat_semaphore *,\ + struct semaphore *, ##__VA_ARGS__) + +#define sema_init(sem, val) \ + PICK_SEM_OP(compat_sema_init, rt_sema_init, sem, val) + +#define init_MUTEX(sem) PICK_SEM_OP(compat_init_MUTEX, rt_init_MUTEX, sem) + +#define init_MUTEX_LOCKED(sem) \ + PICK_SEM_OP(compat_init_MUTEX_LOCKED, rt_init_MUTEX_LOCKED, sem) + +#define down(sem) PICK_SEM_OP(compat_down, rt_down, sem) + +#define down_timeout(sem, jiff) \ + PICK_SEM_OP_RET(compat_down_timeout, rt_down_timeout, sem, jiff) + +#define down_interruptible(sem) \ + PICK_SEM_OP_RET(compat_down_interruptible, rt_down_interruptible, sem) + +#define down_trylock(sem) \ + PICK_SEM_OP_RET(compat_down_trylock, rt_down_trylock, sem) + +#define up(sem) PICK_SEM_OP(compat_up, rt_up, sem) + +/* + * rwsems: + */ + +#define __RWSEM_INITIALIZER(name) \ + { .lock = __RT_MUTEX_INITIALIZER(name.lock) } + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, + struct lock_class_key *key); + +# define rt_init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __rt_rwsem_init((sem), #sem, &__key); \ +} while (0) + +extern void rt_down_write(struct rw_semaphore *rwsem); +extern void +rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); +extern void +rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); +extern void rt_down_read(struct rw_semaphore *rwsem); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void rt_down_read_non_owner(struct rw_semaphore *rwsem); +#else +# define rt_down_read_non_owner(rwsem) rt_down_read(rwsem) +#endif +extern int rt_down_write_trylock(struct rw_semaphore *rwsem); +extern int rt_down_read_trylock(struct rw_semaphore *rwsem); +extern void rt_up_read(struct rw_semaphore *rwsem); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void rt_up_read_non_owner(struct rw_semaphore *rwsem); +#else +# define rt_up_read_non_owner(rwsem) rt_up_read(rwsem) +#endif +extern void rt_up_write(struct rw_semaphore *rwsem); +extern void rt_downgrade_write(struct rw_semaphore *rwsem); + +# define rt_rwsem_is_locked(rws) (rt_mutex_is_locked(&(rws)->lock)) + +#define PICK_RWSEM_OP(...) PICK_FUNCTION(struct compat_rw_semaphore *, \ + struct rw_semaphore *, ##__VA_ARGS__) +#define PICK_RWSEM_OP_RET(...) PICK_FUNCTION_RET(struct compat_rw_semaphore *,\ + struct rw_semaphore *, ##__VA_ARGS__) + +#define init_rwsem(rwsem) PICK_RWSEM_OP(compat_init_rwsem, rt_init_rwsem, rwsem) + +#define down_read(rwsem) PICK_RWSEM_OP(compat_down_read, rt_down_read, rwsem) + +#define down_read_non_owner(rwsem) \ + PICK_RWSEM_OP(compat_down_read_non_owner, rt_down_read_non_owner, rwsem) + +#define down_read_trylock(rwsem) \ + PICK_RWSEM_OP_RET(compat_down_read_trylock, rt_down_read_trylock, rwsem) + +#define down_write(rwsem) PICK_RWSEM_OP(compat_down_write, rt_down_write, rwsem) + +#define down_read_nested(rwsem, subclass) \ + PICK_RWSEM_OP(compat_down_read_nested, rt_down_read_nested, \ + rwsem, subclass) + +#define down_write_nested(rwsem, subclass) \ + PICK_RWSEM_OP(compat_down_write_nested, rt_down_write_nested, \ + rwsem, subclass) + +#define down_write_trylock(rwsem) \ + PICK_RWSEM_OP_RET(compat_down_write_trylock, rt_down_write_trylock,\ + rwsem) + +#define up_read(rwsem) PICK_RWSEM_OP(compat_up_read, rt_up_read, rwsem) + +#define up_read_non_owner(rwsem) \ + PICK_RWSEM_OP(compat_up_read_non_owner, rt_up_read_non_owner, rwsem) + +#define up_write(rwsem) PICK_RWSEM_OP(compat_up_write, rt_up_write, rwsem) + +#define downgrade_write(rwsem) \ + PICK_RWSEM_OP(compat_downgrade_write, rt_downgrade_write, rwsem) + +#define rwsem_is_locked(rwsem) \ + PICK_RWSEM_OP_RET(compat_rwsem_is_locked, rt_rwsem_is_locked, rwsem) + +#endif /* CONFIG_PREEMPT_RT */ + +#endif + Index: linux-2.6-tip/include/linux/rtmutex.h =================================================================== --- linux-2.6-tip.orig/include/linux/rtmutex.h +++ linux-2.6-tip/include/linux/rtmutex.h @@ -24,7 +24,7 @@ * @owner: the mutex owner */ struct rt_mutex { - spinlock_t wait_lock; + raw_spinlock_t wait_lock; struct plist_head wait_list; struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES @@ -63,7 +63,7 @@ struct hrtimer_sleeper; #endif #define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + { .wait_lock = RAW_SPIN_LOCK_UNLOCKED(mutexname) \ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ , .owner = NULL \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} @@ -88,6 +88,8 @@ extern void rt_mutex_destroy(struct rt_m extern void rt_mutex_lock(struct rt_mutex *lock); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, int detect_deadlock); +extern int rt_mutex_lock_killable(struct rt_mutex *lock, + int detect_deadlock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock); Index: linux-2.6-tip/include/linux/rwsem-spinlock.h =================================================================== --- linux-2.6-tip.orig/include/linux/rwsem-spinlock.h +++ linux-2.6-tip/include/linux/rwsem-spinlock.h @@ -28,7 +28,7 @@ struct rwsem_waiter; * - if activity is -1 then there is one active writer * - if wait_list is not empty, then there are processes waiting for the semaphore */ -struct rw_semaphore { +struct compat_rw_semaphore { __s32 activity; spinlock_t wait_lock; struct list_head wait_list; @@ -43,33 +43,32 @@ struct rw_semaphore { # define __RWSEM_DEP_MAP_INIT(lockname) #endif -#define __RWSEM_INITIALIZER(name) \ -{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } +#define __COMPAT_RWSEM_INITIALIZER(name) \ +{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) +#define COMPAT_DECLARE_RWSEM(name) \ + struct compat_rw_semaphore name = __COMPAT_RWSEM_INITIALIZER(name) -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, +extern void __compat_init_rwsem(struct compat_rw_semaphore *sem, const char *name, struct lock_class_key *key); -#define init_rwsem(sem) \ +#define compat_init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ - __init_rwsem((sem), #sem, &__key); \ + __compat_init_rwsem((sem), #sem, &__key); \ } while (0) -extern void __down_read(struct rw_semaphore *sem); -extern int __down_read_trylock(struct rw_semaphore *sem); -extern void __down_write(struct rw_semaphore *sem); -extern void __down_write_nested(struct rw_semaphore *sem, int subclass); -extern int __down_write_trylock(struct rw_semaphore *sem); -extern void __up_read(struct rw_semaphore *sem); -extern void __up_write(struct rw_semaphore *sem); -extern void __downgrade_write(struct rw_semaphore *sem); +extern void __down_read(struct compat_rw_semaphore *sem); +extern int __down_read_trylock(struct compat_rw_semaphore *sem); +extern void __down_write(struct compat_rw_semaphore *sem); +extern void __down_write_nested(struct compat_rw_semaphore *sem, int subclass); +extern int __down_write_trylock(struct compat_rw_semaphore *sem); +extern void __up_read(struct compat_rw_semaphore *sem); +extern void __up_write(struct compat_rw_semaphore *sem); +extern void __downgrade_write(struct compat_rw_semaphore *sem); -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline int compat_rwsem_is_locked(struct compat_rw_semaphore *sem) { return (sem->activity != 0); } Index: linux-2.6-tip/include/linux/rwsem.h =================================================================== --- linux-2.6-tip.orig/include/linux/rwsem.h +++ linux-2.6-tip/include/linux/rwsem.h @@ -9,53 +9,68 @@ #include +#ifdef CONFIG_PREEMPT_RT +# include +#endif + #include #include #include #include -struct rw_semaphore; +#ifndef CONFIG_PREEMPT_RT +/* + * On !PREEMPT_RT all rw-semaphores are compat: + */ +#define compat_rw_semaphore rw_semaphore +#endif + +struct compat_rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include /* use a generic implementation */ +# include /* use a generic implementation */ +# ifndef CONFIG_PREEMPT_RT +# define __RWSEM_INITIALIZER __COMPAT_RWSEM_INITIALIZER +# define DECLARE_RWSEM COMPAT_DECLARE_RWSEM +# endif #else -#include /* use an arch-specific implementation */ +# include /* use an arch-specific implementation */ #endif /* * lock for reading */ -extern void down_read(struct rw_semaphore *sem); +extern void compat_down_read(struct compat_rw_semaphore *sem); /* * trylock for reading -- returns 1 if successful, 0 if contention */ -extern int down_read_trylock(struct rw_semaphore *sem); +extern int compat_down_read_trylock(struct compat_rw_semaphore *sem); /* * lock for writing */ -extern void down_write(struct rw_semaphore *sem); +extern void compat_down_write(struct compat_rw_semaphore *sem); /* * trylock for writing -- returns 1 if successful, 0 if contention */ -extern int down_write_trylock(struct rw_semaphore *sem); +extern int compat_down_write_trylock(struct compat_rw_semaphore *sem); /* * release a read lock */ -extern void up_read(struct rw_semaphore *sem); +extern void compat_up_read(struct compat_rw_semaphore *sem); /* * release a write lock */ -extern void up_write(struct rw_semaphore *sem); +extern void compat_up_write(struct compat_rw_semaphore *sem); /* * downgrade write lock to read lock */ -extern void downgrade_write(struct rw_semaphore *sem); +extern void compat_downgrade_write(struct compat_rw_semaphore *sem); #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -71,21 +86,78 @@ extern void downgrade_write(struct rw_se * lockdep_set_class() at lock initialization time. * See Documentation/lockdep-design.txt for more details.) */ -extern void down_read_nested(struct rw_semaphore *sem, int subclass); -extern void down_write_nested(struct rw_semaphore *sem, int subclass); +extern void +compat_down_read_nested(struct compat_rw_semaphore *sem, int subclass); +extern void +compat_down_write_nested(struct compat_rw_semaphore *sem, int subclass); /* * Take/release a lock when not the owner will release it. * * [ This API should be avoided as much as possible - the * proper abstraction for this case is completions. ] */ -extern void down_read_non_owner(struct rw_semaphore *sem); -extern void up_read_non_owner(struct rw_semaphore *sem); +extern void +compat_down_read_non_owner(struct compat_rw_semaphore *sem); +extern void +compat_up_read_non_owner(struct compat_rw_semaphore *sem); #else -# define down_read_nested(sem, subclass) down_read(sem) -# define down_write_nested(sem, subclass) down_write(sem) -# define down_read_non_owner(sem) down_read(sem) -# define up_read_non_owner(sem) up_read(sem) +# define compat_down_read_nested(sem, subclass) compat_down_read(sem) +# define compat_down_write_nested(sem, subclass) compat_down_write(sem) +# define compat_down_read_non_owner(sem) compat_down_read(sem) +# define compat_up_read_non_owner(sem) compat_up_read(sem) #endif +#ifndef CONFIG_PREEMPT_RT + +#define DECLARE_RWSEM COMPAT_DECLARE_RWSEM + +/* + * NOTE, lockdep: this has to be a macro, so that separate class-keys + * get generated by the compiler, if the same function does multiple + * init_rwsem() calls to different rwsems. + */ +#define init_rwsem(rwsem) compat_init_rwsem(rwsem) + +static inline void down_read(struct compat_rw_semaphore *rwsem) +{ + compat_down_read(rwsem); +} +static inline int down_read_trylock(struct compat_rw_semaphore *rwsem) +{ + return compat_down_read_trylock(rwsem); +} +static inline void down_write(struct compat_rw_semaphore *rwsem) +{ + compat_down_write(rwsem); +} +static inline int down_write_trylock(struct compat_rw_semaphore *rwsem) +{ + return compat_down_write_trylock(rwsem); +} +static inline void up_read(struct compat_rw_semaphore *rwsem) +{ + compat_up_read(rwsem); +} +static inline void up_write(struct compat_rw_semaphore *rwsem) +{ + compat_up_write(rwsem); +} +static inline void downgrade_write(struct compat_rw_semaphore *rwsem) +{ + compat_downgrade_write(rwsem); +} +static inline int rwsem_is_locked(struct compat_rw_semaphore *sem) +{ + return compat_rwsem_is_locked(sem); +} +# define down_read_nested(sem, subclass) \ + compat_down_read_nested(sem, subclass) +# define down_write_nested(sem, subclass) \ + compat_down_write_nested(sem, subclass) +# define down_read_non_owner(sem) \ + compat_down_read_non_owner(sem) +# define up_read_non_owner(sem) \ + compat_up_read_non_owner(sem) +#endif /* !CONFIG_PREEMPT_RT */ + #endif /* _LINUX_RWSEM_H */ Index: linux-2.6-tip/include/linux/sched.h =================================================================== --- linux-2.6-tip.orig/include/linux/sched.h +++ linux-2.6-tip/include/linux/sched.h @@ -177,6 +177,7 @@ print_cfs_rq(struct seq_file *m, int cpu #endif extern unsigned long long time_sync_thresh; +extern struct semaphore kernel_sem; /* * Task state bitmask. NOTE! These bits are also @@ -189,16 +190,17 @@ extern unsigned long long time_sync_thre * mistake. */ #define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 +#define TASK_RUNNING_MUTEX 1 +#define TASK_INTERRUPTIBLE 2 +#define TASK_UNINTERRUPTIBLE 4 +#define __TASK_STOPPED 8 +#define __TASK_TRACED 16 /* in tsk->exit_state */ -#define EXIT_ZOMBIE 16 -#define EXIT_DEAD 32 +#define EXIT_ZOMBIE 32 +#define EXIT_DEAD 64 /* in tsk->state again */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 +#define TASK_DEAD 128 +#define TASK_WAKEKILL 256 /* Convenience macros for the sake of set_task_state */ #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) @@ -207,10 +209,12 @@ extern unsigned long long time_sync_thre /* Convenience macros for the sake of wake_up */ #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) +#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED | \ + TASK_RUNNING_MUTEX) /* get_task_state() */ -#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ +#define TASK_REPORT (TASK_RUNNING | TASK_RUNNING_MUTEX | \ + TASK_INTERRUPTIBLE | \ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ __TASK_TRACED) @@ -507,7 +511,7 @@ struct task_cputime { struct thread_group_cputimer { struct task_cputime cputime; int running; - spinlock_t lock; + raw_spinlock_t lock; }; /* @@ -1326,7 +1330,7 @@ struct task_struct { spinlock_t alloc_lock; /* Protection of the PI data structures: */ - spinlock_t pi_lock; + raw_spinlock_t pi_lock; #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task */ @@ -1363,6 +1367,26 @@ struct task_struct { gfp_t lockdep_reclaim_gfp; #endif +/* realtime bits */ + +#define MAX_PREEMPT_TRACE 25 +#define MAX_LOCK_STACK MAX_PREEMPT_TRACE +#ifdef CONFIG_DEBUG_PREEMPT + int lock_count; +# ifdef CONFIG_PREEMPT_RT + struct rt_mutex *owned_lock[MAX_LOCK_STACK]; +# endif +#endif +#ifdef CONFIG_DETECT_SOFTLOCKUP + unsigned long softlockup_count; /* Count to keep track how long the + * thread is in the kernel without + * sleeping. + */ +#endif +#ifdef CONFIG_DEBUG_RT_MUTEXES + void *last_kernel_lock; +#endif + /* journalling filesystem info */ void *journal_info; @@ -1642,6 +1666,7 @@ extern cputime_t task_gtime(struct task_ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ #define PF_HARDIRQ 0x08000020 /* hardirq context */ +#define PF_NOSCHED 0x00000020 /* Userspace does not expect scheduling */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ @@ -1831,6 +1856,7 @@ extern struct task_struct *curr_task(int extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); +void __yield(void); /* * The default (Linux) execution domain. @@ -1898,6 +1924,9 @@ extern void do_timer(unsigned long ticks extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); +extern int wake_up_process_mutex(struct task_struct * tsk); +extern int wake_up_process_sync(struct task_struct * tsk); +extern int wake_up_process_mutex_sync(struct task_struct * tsk); extern void wake_up_new_task(struct task_struct *tsk, unsigned long clone_flags); #ifdef CONFIG_SMP @@ -2243,7 +2272,13 @@ static inline int cond_resched(void) return _cond_resched(); } #endif -extern int cond_resched_lock(spinlock_t * lock); +extern int __cond_resched_raw_spinlock(raw_spinlock_t *lock); +extern int __cond_resched_spinlock(spinlock_t *spinlock); + +#define cond_resched_lock(lock) \ + PICK_SPIN_OP_RET(__cond_resched_raw_spinlock, __cond_resched_spinlock,\ + lock) + extern int cond_resched_softirq(void); static inline int cond_resched_bkl(void) { @@ -2257,7 +2292,7 @@ extern int cond_resched_hardirq_context( * task waiting?: (technically does not depend on CONFIG_PREEMPT, * but a general need for low latency) */ -static inline int spin_needbreak(spinlock_t *lock) +static inline int __raw_spin_needbreak(raw_spinlock_t *lock) { #ifdef CONFIG_PREEMPT return spin_is_contended(lock); @@ -2283,6 +2318,23 @@ static inline void thread_group_cputime_ { } +#ifdef CONFIG_PREEMPT_RT +static inline int __spin_needbreak(spinlock_t *lock) +{ + return lock->break_lock; +} +#else +static inline int __spin_needbreak(spinlock_t *lock) +{ + /* should never be call outside of RT */ + BUG(); + return 0; +} +#endif + +#define spin_needbreak(lock) \ + PICK_SPIN_OP_RET(__raw_spin_needbreak, __spin_needbreak, lock) + static inline int softirq_need_resched(void) { if (softirq_preemption && (current->flags & PF_SOFTIRQ)) Index: linux-2.6-tip/include/linux/semaphore.h =================================================================== --- linux-2.6-tip.orig/include/linux/semaphore.h +++ linux-2.6-tip/include/linux/semaphore.h @@ -9,41 +9,86 @@ #ifndef __LINUX_SEMAPHORE_H #define __LINUX_SEMAPHORE_H -#include -#include +#ifndef CONFIG_PREEMPT_RT +# define compat_semaphore semaphore +#endif + +# include +# include /* Please don't access any members of this structure directly */ -struct semaphore { +struct compat_semaphore { spinlock_t lock; unsigned int count; struct list_head wait_list; }; -#define __SEMAPHORE_INITIALIZER(name, n) \ +#define __COMPAT_SEMAPHORE_INITIALIZER(name, n) \ { \ .lock = __SPIN_LOCK_UNLOCKED((name).lock), \ .count = n, \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ } -#define DECLARE_MUTEX(name) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) +#define __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, count) \ + struct compat_semaphore name = __COMPAT_SEMAPHORE_INITIALIZER(name, count) -static inline void sema_init(struct semaphore *sem, int val) +#define COMPAT_DECLARE_MUTEX(name) __COMPAT_DECLARE_SEMAPHORE_GENERIC(name, 1) +static inline void compat_sema_init(struct compat_semaphore *sem, int val) { static struct lock_class_key __key; - *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); + *sem = (struct compat_semaphore) __COMPAT_SEMAPHORE_INITIALIZER(*sem, val); lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); } -#define init_MUTEX(sem) sema_init(sem, 1) -#define init_MUTEX_LOCKED(sem) sema_init(sem, 0) +#define compat_init_MUTEX(sem) compat_sema_init(sem, 1) +#define compat_init_MUTEX_LOCKED(sem) compat_sema_init(sem, 0) + +extern void compat_down(struct compat_semaphore *sem); +extern int __must_check compat_down_interruptible(struct compat_semaphore *sem); +extern int __must_check compat_down_killable(struct compat_semaphore *sem); +extern int __must_check compat_down_trylock(struct compat_semaphore *sem); +extern int __must_check compat_down_timeout(struct compat_semaphore *sem, long jiffies); +extern void compat_up(struct compat_semaphore *sem); + +#ifdef CONFIG_PREEMPT_RT +# include +#else +#define DECLARE_MUTEX COMPAT_DECLARE_MUTEX -extern void down(struct semaphore *sem); -extern int __must_check down_interruptible(struct semaphore *sem); -extern int __must_check down_killable(struct semaphore *sem); -extern int __must_check down_trylock(struct semaphore *sem); -extern int __must_check down_timeout(struct semaphore *sem, long jiffies); -extern void up(struct semaphore *sem); +static inline void sema_init(struct compat_semaphore *sem, int val) +{ + compat_sema_init(sem, val); +} +static inline void init_MUTEX(struct compat_semaphore *sem) +{ + compat_init_MUTEX(sem); +} +static inline void init_MUTEX_LOCKED(struct compat_semaphore *sem) +{ + compat_init_MUTEX_LOCKED(sem); +} +static inline void down(struct compat_semaphore *sem) +{ + compat_down(sem); +} +static inline int down_interruptible(struct compat_semaphore *sem) +{ + return compat_down_interruptible(sem); +} +static inline int down_trylock(struct compat_semaphore *sem) +{ + return compat_down_trylock(sem); +} +static inline int down_timeout(struct compat_semaphore *sem, long jiffies) +{ + return compat_down_timeout(sem, jiffies); +} + +static inline void up(struct compat_semaphore *sem) +{ + compat_up(sem); +} +#endif /* CONFIG_PREEMPT_RT */ #endif /* __LINUX_SEMAPHORE_H */ Index: linux-2.6-tip/include/linux/seqlock.h =================================================================== --- linux-2.6-tip.orig/include/linux/seqlock.h +++ linux-2.6-tip/include/linux/seqlock.h @@ -32,46 +32,80 @@ typedef struct { unsigned sequence; spinlock_t lock; -} seqlock_t; +} __seqlock_t; + +typedef struct { + unsigned sequence; + raw_spinlock_t lock; +} __raw_seqlock_t; + +#define seqlock_need_resched(seq) lock_need_resched(&(seq)->lock) + +#ifdef CONFIG_PREEMPT_RT +typedef __seqlock_t seqlock_t; +#else +typedef __raw_seqlock_t seqlock_t; +#endif + +typedef __raw_seqlock_t raw_seqlock_t; /* * These macros triggered gcc-3.x compile-time problems. We think these are * OK now. Be cautious. */ -#define __SEQLOCK_UNLOCKED(lockname) \ - { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#define __RAW_SEQLOCK_UNLOCKED(lockname) \ + { 0, RAW_SPIN_LOCK_UNLOCKED(lockname) } + +#ifdef CONFIG_PREEMPT_RT +# define __SEQLOCK_UNLOCKED(lockname) { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#else +# define __SEQLOCK_UNLOCKED(lockname) __RAW_SEQLOCK_UNLOCKED(lockname) +#endif #define SEQLOCK_UNLOCKED \ __SEQLOCK_UNLOCKED(old_style_seqlock_init) -#define seqlock_init(x) \ - do { \ - (x)->sequence = 0; \ - spin_lock_init(&(x)->lock); \ - } while (0) +#define raw_seqlock_init(x) \ + do { *(x) = (raw_seqlock_t) __RAW_SEQLOCK_UNLOCKED(x); spin_lock_init(&(x)->lock); } while (0) + +#define seqlock_init(x) \ + do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); spin_lock_init(&(x)->lock); } while (0) #define DEFINE_SEQLOCK(x) \ seqlock_t x = __SEQLOCK_UNLOCKED(x) +#define DEFINE_RAW_SEQLOCK(name) \ + raw_seqlock_t name __cacheline_aligned_in_smp = \ + __RAW_SEQLOCK_UNLOCKED(name) + + /* Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. * Don't need preempt_disable() because that is in the spin_lock already. */ -static inline void write_seqlock(seqlock_t *sl) +static inline void __write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); ++sl->sequence; smp_wmb(); } -static inline void write_sequnlock(seqlock_t *sl) +static __always_inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) +{ + __write_seqlock(sl); + return 0; +} + +static inline void __write_sequnlock(seqlock_t *sl) { smp_wmb(); sl->sequence++; spin_unlock(&sl->lock); } -static inline int write_tryseqlock(seqlock_t *sl) +#define __write_sequnlock_irqrestore(sl, flags) __write_sequnlock(sl) + +static inline int __write_tryseqlock(seqlock_t *sl) { int ret = spin_trylock(&sl->lock); @@ -83,7 +117,7 @@ static inline int write_tryseqlock(seqlo } /* Start of read calculation -- fetch last complete writer token */ -static __always_inline unsigned read_seqbegin(const seqlock_t *sl) +static __always_inline unsigned __read_seqbegin(const seqlock_t *sl) { unsigned ret; @@ -103,13 +137,195 @@ repeat: * * If sequence value changed then writer changed data while in section. */ -static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) +static inline int __read_seqretry(seqlock_t *sl, unsigned iv) +{ + int ret; + + smp_rmb(); + ret = (iv & 1) | (sl->sequence ^ iv); + /* + * If invalid then serialize with the writer, to make sure we + * are not livelocking it: + */ + if (unlikely(ret)) { + unsigned long flags; + spin_lock_irqsave(&sl->lock, flags); + spin_unlock_irqrestore(&sl->lock, flags); + } + return ret; +} + +static __always_inline void __write_seqlock_raw(raw_seqlock_t *sl) +{ + spin_lock(&sl->lock); + ++sl->sequence; + smp_wmb(); +} + +static __always_inline unsigned long +__write_seqlock_irqsave_raw(raw_seqlock_t *sl) +{ + unsigned long flags; + + local_irq_save(flags); + __write_seqlock_raw(sl); + return flags; +} + +static __always_inline void __write_seqlock_irq_raw(raw_seqlock_t *sl) +{ + local_irq_disable(); + __write_seqlock_raw(sl); +} + +static __always_inline void __write_seqlock_bh_raw(raw_seqlock_t *sl) +{ + local_bh_disable(); + __write_seqlock_raw(sl); +} + +static __always_inline void __write_sequnlock_raw(raw_seqlock_t *sl) +{ + smp_wmb(); + sl->sequence++; + spin_unlock(&sl->lock); +} + +static __always_inline void +__write_sequnlock_irqrestore_raw(raw_seqlock_t *sl, unsigned long flags) +{ + __write_sequnlock_raw(sl); + local_irq_restore(flags); + preempt_check_resched(); +} + +static __always_inline void __write_sequnlock_irq_raw(raw_seqlock_t *sl) +{ + __write_sequnlock_raw(sl); + local_irq_enable(); + preempt_check_resched(); +} + +static __always_inline void __write_sequnlock_bh_raw(raw_seqlock_t *sl) +{ + __write_sequnlock_raw(sl); + local_bh_enable(); +} + +static __always_inline int __write_tryseqlock_raw(raw_seqlock_t *sl) +{ + int ret = spin_trylock(&sl->lock); + + if (ret) { + ++sl->sequence; + smp_wmb(); + } + return ret; +} + +static __always_inline unsigned __read_seqbegin_raw(const raw_seqlock_t *sl) +{ + unsigned ret = sl->sequence; + smp_rmb(); + return ret; +} + +static __always_inline int __read_seqretry_raw(const raw_seqlock_t *sl, unsigned start) { smp_rmb(); return (sl->sequence != start); } +extern int __bad_seqlock_type(void); + +/* + * PICK_SEQ_OP() is a small redirector to allow less typing of the lock + * types raw_seqlock_t, seqlock_t, at the front of the PICK_FUNCTION + * macro. + */ +#define PICK_SEQ_OP(...) \ + PICK_FUNCTION(raw_seqlock_t *, seqlock_t *, ##__VA_ARGS__) +#define PICK_SEQ_OP_RET(...) \ + PICK_FUNCTION_RET(raw_seqlock_t *, seqlock_t *, ##__VA_ARGS__) + +#define write_seqlock(sl) PICK_SEQ_OP(__write_seqlock_raw, __write_seqlock, sl) + +#define write_sequnlock(sl) \ + PICK_SEQ_OP(__write_sequnlock_raw, __write_sequnlock, sl) + +#define write_tryseqlock(sl) \ + PICK_SEQ_OP_RET(__write_tryseqlock_raw, __write_tryseqlock, sl) + +#define read_seqbegin(sl) \ + PICK_SEQ_OP_RET(__read_seqbegin_raw, __read_seqbegin, sl) + +#define read_seqretry(sl, iv) \ + PICK_SEQ_OP_RET(__read_seqretry_raw, __read_seqretry, sl, iv) + +#define write_seqlock_irqsave(lock, flags) \ +do { \ + flags = PICK_SEQ_OP_RET(__write_seqlock_irqsave_raw, \ + __write_seqlock_irqsave, lock); \ +} while (0) + +#define write_seqlock_irq(lock) \ + PICK_SEQ_OP(__write_seqlock_irq_raw, __write_seqlock, lock) + +#define write_seqlock_bh(lock) \ + PICK_SEQ_OP(__write_seqlock_bh_raw, __write_seqlock, lock) + +#define write_sequnlock_irqrestore(lock, flags) \ + PICK_SEQ_OP(__write_sequnlock_irqrestore_raw, \ + __write_sequnlock_irqrestore, lock, flags) + +#define write_sequnlock_bh(lock) \ + PICK_SEQ_OP(__write_sequnlock_bh_raw, __write_sequnlock, lock) + +#define write_sequnlock_irq(lock) \ + PICK_SEQ_OP(__write_sequnlock_irq_raw, __write_sequnlock, lock) + +static __always_inline +unsigned long __read_seqbegin_irqsave_raw(raw_seqlock_t *sl) +{ + unsigned long flags; + + local_irq_save(flags); + __read_seqbegin_raw(sl); + return flags; +} + +static __always_inline unsigned long __read_seqbegin_irqsave(seqlock_t *sl) +{ + __read_seqbegin(sl); + return 0; +} + +#define read_seqbegin_irqsave(lock, flags) \ +do { \ + flags = PICK_SEQ_OP_RET(__read_seqbegin_irqsave_raw, \ + __read_seqbegin_irqsave, lock); \ +} while (0) + +static __always_inline int +__read_seqretry_irqrestore(seqlock_t *sl, unsigned iv, unsigned long flags) +{ + return __read_seqretry(sl, iv); +} + +static __always_inline int +__read_seqretry_irqrestore_raw(raw_seqlock_t *sl, unsigned iv, + unsigned long flags) +{ + int ret = read_seqretry(sl, iv); + local_irq_restore(flags); + preempt_check_resched(); + return ret; +} + +#define read_seqretry_irqrestore(lock, iv, flags) \ + PICK_SEQ_OP_RET(__read_seqretry_irqrestore_raw, \ + __read_seqretry_irqrestore, lock, iv, flags) /* * Version using sequence counter only. @@ -166,32 +382,4 @@ static inline void write_seqcount_end(se smp_wmb(); s->sequence++; } - -/* - * Possible sw/hw IRQ protected versions of the interfaces. - */ -#define write_seqlock_irqsave(lock, flags) \ - do { local_irq_save(flags); write_seqlock(lock); } while (0) -#define write_seqlock_irq(lock) \ - do { local_irq_disable(); write_seqlock(lock); } while (0) -#define write_seqlock_bh(lock) \ - do { local_bh_disable(); write_seqlock(lock); } while (0) - -#define write_sequnlock_irqrestore(lock, flags) \ - do { write_sequnlock(lock); local_irq_restore(flags); } while(0) -#define write_sequnlock_irq(lock) \ - do { write_sequnlock(lock); local_irq_enable(); } while(0) -#define write_sequnlock_bh(lock) \ - do { write_sequnlock(lock); local_bh_enable(); } while(0) - -#define read_seqbegin_irqsave(lock, flags) \ - ({ local_irq_save(flags); read_seqbegin(lock); }) - -#define read_seqretry_irqrestore(lock, iv, flags) \ - ({ \ - int ret = read_seqretry(lock, iv); \ - local_irq_restore(flags); \ - ret; \ - }) - #endif /* __LINUX_SEQLOCK_H */ Index: linux-2.6-tip/include/linux/spinlock.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock.h +++ linux-2.6-tip/include/linux/spinlock.h @@ -44,6 +44,42 @@ * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. + * + * + * Public types and naming conventions: + * ------------------------------------ + * spinlock_t: type: sleep-lock + * raw_spinlock_t: type: spin-lock (debug) + * + * spin_lock([raw_]spinlock_t): API: acquire lock, both types + * + * + * Internal types and naming conventions: + * ------------------------------------- + * __raw_spinlock_t: type: lowlevel spin-lock + * + * _spin_lock(struct rt_mutex): API: acquire sleep-lock + * __spin_lock(raw_spinlock_t): API: acquire spin-lock (highlevel) + * _raw_spin_lock(raw_spinlock_t): API: acquire spin-lock (debug) + * __raw_spin_lock(__raw_spinlock_t): API: acquire spin-lock (lowlevel) + * + * + * spin_lock(raw_spinlock_t) translates into the following chain of + * calls/inlines/macros, if spin-lock debugging is enabled: + * + * spin_lock() [include/linux/spinlock.h] + * -> __spin_lock() [kernel/spinlock.c] + * -> _raw_spin_lock() [lib/spinlock_debug.c] + * -> __raw_spin_lock() [include/asm/spinlock.h] + * + * spin_lock(spinlock_t) translates into the following chain of + * calls/inlines/macros: + * + * spin_lock() [include/linux/spinlock.h] + * -> _spin_lock() [include/linux/spinlock.h] + * -> rt_spin_lock() [kernel/rtmutex.c] + * -> rt_spin_lock_fastlock() [kernel/rtmutex.c] + * -> rt_spin_lock_slowlock() [kernel/rtmutex.c] */ #include @@ -52,29 +88,15 @@ #include #include #include +#include #include #include +#include +#include #include /* - * Must define these before including other files, inline functions need them - */ -#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME - -#define LOCK_SECTION_START(extra) \ - ".subsection 1\n\t" \ - extra \ - ".ifndef " LOCK_SECTION_NAME "\n\t" \ - LOCK_SECTION_NAME ":\n\t" \ - ".endif\n" - -#define LOCK_SECTION_END \ - ".previous\n\t" - -#define __lockfunc __attribute__((section(".spinlock.text"))) - -/* * Pull the raw_spinlock_t and raw_rwlock_t definitions: */ #include @@ -90,36 +112,10 @@ extern int __lockfunc generic__raw_read_ # include #endif -#ifdef CONFIG_DEBUG_SPINLOCK - extern void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key); -# define spin_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __spin_lock_init((lock), #lock, &__key); \ -} while (0) - -#else -# define spin_lock_init(lock) \ - do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK - extern void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key); -# define rwlock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __rwlock_init((lock), #lock, &__key); \ -} while (0) -#else -# define rwlock_init(lock) \ - do { *(lock) = RW_LOCK_UNLOCKED; } while (0) -#endif - -#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) +/* + * Pull the RT types: + */ +#include #ifdef CONFIG_GENERIC_LOCKBREAK #define spin_is_contended(lock) ((lock)->break_lock) @@ -132,12 +128,6 @@ do { \ #endif /*__raw_spin_is_contended*/ #endif -/** - * spin_unlock_wait - wait until the spinlock gets unlocked - * @lock: the spinlock in question. - */ -#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) - /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: */ @@ -148,16 +138,16 @@ do { \ #endif #ifdef CONFIG_DEBUG_SPINLOCK - extern void _raw_spin_lock(spinlock_t *lock); -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) - extern int _raw_spin_trylock(spinlock_t *lock); - extern void _raw_spin_unlock(spinlock_t *lock); - extern void _raw_read_lock(rwlock_t *lock); - extern int _raw_read_trylock(rwlock_t *lock); - extern void _raw_read_unlock(rwlock_t *lock); - extern void _raw_write_lock(rwlock_t *lock); - extern int _raw_write_trylock(rwlock_t *lock); - extern void _raw_write_unlock(rwlock_t *lock); + extern __lockfunc void _raw_spin_lock(raw_spinlock_t *lock); +# define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) + extern __lockfunc int _raw_spin_trylock(raw_spinlock_t *lock); + extern __lockfunc void _raw_spin_unlock(raw_spinlock_t *lock); + extern __lockfunc void _raw_read_lock(raw_rwlock_t *lock); + extern __lockfunc int _raw_read_trylock(raw_rwlock_t *lock); + extern __lockfunc void _raw_read_unlock(raw_rwlock_t *lock); + extern __lockfunc void _raw_write_lock(raw_rwlock_t *lock); + extern __lockfunc int _raw_write_trylock(raw_rwlock_t *lock); + extern __lockfunc void _raw_write_unlock(raw_rwlock_t *lock); #else # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ @@ -172,179 +162,425 @@ do { \ # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) #endif -#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) -#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) +extern int __bad_spinlock_type(void); +extern int __bad_rwlock_type(void); + +extern void +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); + +extern void __lockfunc rt_spin_lock(spinlock_t *lock); +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void __lockfunc rt_spin_unlock(spinlock_t *lock); +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); +extern int __lockfunc +rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); +extern int __lockfunc rt_spin_trylock(spinlock_t *lock); +extern int _atomic_dec_and_spin_lock(spinlock_t *lock, atomic_t *atomic); + +/* + * lockdep-less calls, for derived types like rwlock: + * (for trylock they can use rt_mutex_trylock() directly. + */ +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); + +#ifdef CONFIG_PREEMPT_RT +# define _spin_lock(l) rt_spin_lock(l) +# define _spin_lock_nested(l, s) rt_spin_lock_nested(l, s) +# define _spin_lock_bh(l) rt_spin_lock(l) +# define _spin_lock_irq(l) rt_spin_lock(l) +# define _spin_unlock(l) rt_spin_unlock(l) +# define _spin_unlock_no_resched(l) rt_spin_unlock(l) +# define _spin_unlock_bh(l) rt_spin_unlock(l) +# define _spin_unlock_irq(l) rt_spin_unlock(l) +# define _spin_unlock_irqrestore(l, f) rt_spin_unlock(l) +static inline unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +{ + rt_spin_lock(lock); + return 0; +} +static inline unsigned long __lockfunc +_spin_lock_irqsave_nested(spinlock_t *lock, int subclass) +{ + rt_spin_lock_nested(lock, subclass); + return 0; +} +#else +static inline unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +{ + return 0; +} +static inline unsigned long __lockfunc +_spin_lock_irqsave_nested(spinlock_t *lock, int subclass) +{ + return 0; +} +# define _spin_lock(l) do { } while (0) +# define _spin_lock_nested(l, s) do { } while (0) +# define _spin_lock_bh(l) do { } while (0) +# define _spin_lock_irq(l) do { } while (0) +# define _spin_unlock(l) do { } while (0) +# define _spin_unlock_no_resched(l) do { } while (0) +# define _spin_unlock_bh(l) do { } while (0) +# define _spin_unlock_irq(l) do { } while (0) +# define _spin_unlock_irqrestore(l, f) do { } while (0) +#endif + +#define _spin_lock_init(sl, n, f, l) \ +do { \ + static struct lock_class_key __key; \ + \ + __rt_spin_lock_init(sl, n, &__key); \ +} while (0) + +# ifdef CONFIG_PREEMPT_RT +# define _spin_can_lock(l) (!rt_mutex_is_locked(&(l)->lock)) +# define _spin_is_locked(l) rt_mutex_is_locked(&(l)->lock) +# define _spin_unlock_wait(l) rt_spin_unlock_wait(l) + +# define _spin_trylock(l) rt_spin_trylock(l) +# define _spin_trylock_bh(l) rt_spin_trylock(l) +# define _spin_trylock_irq(l) rt_spin_trylock(l) +# define _spin_trylock_irqsave(l,f) rt_spin_trylock_irqsave(l, f) +# else + + extern int this_should_never_be_called_on_non_rt(spinlock_t *lock); +# define TSNBCONRT(l) this_should_never_be_called_on_non_rt(l) +# define _spin_can_lock(l) TSNBCONRT(l) +# define _spin_is_locked(l) TSNBCONRT(l) +# define _spin_unlock_wait(l) TSNBCONRT(l) + +# define _spin_trylock(l) TSNBCONRT(l) +# define _spin_trylock_bh(l) TSNBCONRT(l) +# define _spin_trylock_irq(l) TSNBCONRT(l) +# define _spin_trylock_irqsave(l,f) TSNBCONRT(l) +#endif + +extern void __lockfunc rt_write_lock(rwlock_t *rwlock); +extern void __lockfunc rt_read_lock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, + unsigned long *flags); +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); +extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); +extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); +extern void +__rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); + +#define _rwlock_init(rwl, n, f, l) \ +do { \ + static struct lock_class_key __key; \ + \ + __rt_rwlock_init(rwl, n, &__key); \ +} while (0) + +#ifdef CONFIG_PREEMPT_RT +# define rt_read_can_lock(rwl) (!rt_mutex_is_locked(&(rwl)->lock)) +# define rt_write_can_lock(rwl) (!rt_mutex_is_locked(&(rwl)->lock)) +#else + extern int rt_rwlock_can_lock_never_call_on_non_rt(rwlock_t *rwlock); +# define rt_read_can_lock(rwl) rt_rwlock_can_lock_never_call_on_non_rt(rwl) +# define rt_write_can_lock(rwl) rt_rwlock_can_lock_never_call_on_non_rt(rwl) +#endif + +# define _read_can_lock(rwl) rt_read_can_lock(rwl) +# define _write_can_lock(rwl) rt_write_can_lock(rwl) + +# define _read_trylock(rwl) rt_read_trylock(rwl) +# define _write_trylock(rwl) rt_write_trylock(rwl) +# define _write_trylock_irqsave(rwl, flags) \ + rt_write_trylock_irqsave(rwl, flags) + +# define _read_lock(rwl) rt_read_lock(rwl) +# define _write_lock(rwl) rt_write_lock(rwl) +# define _read_unlock(rwl) rt_read_unlock(rwl) +# define _write_unlock(rwl) rt_write_unlock(rwl) + +# define _read_lock_bh(rwl) rt_read_lock(rwl) +# define _write_lock_bh(rwl) rt_write_lock(rwl) +# define _read_unlock_bh(rwl) rt_read_unlock(rwl) +# define _write_unlock_bh(rwl) rt_write_unlock(rwl) + +# define _read_lock_irq(rwl) rt_read_lock(rwl) +# define _write_lock_irq(rwl) rt_write_lock(rwl) +# define _read_unlock_irq(rwl) rt_read_unlock(rwl) +# define _write_unlock_irq(rwl) rt_write_unlock(rwl) + +# define _read_lock_irqsave(rwl) rt_read_lock_irqsave(rwl) +# define _write_lock_irqsave(rwl) rt_write_lock_irqsave(rwl) + +# define _read_unlock_irqrestore(rwl, f) rt_read_unlock(rwl) +# define _write_unlock_irqrestore(rwl, f) rt_write_unlock(rwl) + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define _raw_spin_lock_init(lock, name, file, line) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_spin_lock_init((lock), #lock, &__key); \ +} while (0) + +#else +#define __raw_spin_lock_init(lock) \ + do { *(lock) = RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) +# define _raw_spin_lock_init(lock, name, file, line) __raw_spin_lock_init(lock) +#endif + +/* + * PICK_SPIN_OP()/PICK_RW_OP() are simple redirectors for PICK_FUNCTION + */ +#define PICK_SPIN_OP(...) \ + PICK_FUNCTION(raw_spinlock_t *, spinlock_t *, ##__VA_ARGS__) +#define PICK_SPIN_OP_RET(...) \ + PICK_FUNCTION_RET(raw_spinlock_t *, spinlock_t *, ##__VA_ARGS__) +#define PICK_RW_OP(...) PICK_FUNCTION(raw_rwlock_t *, rwlock_t *, ##__VA_ARGS__) +#define PICK_RW_OP_RET(...) \ + PICK_FUNCTION_RET(raw_rwlock_t *, rwlock_t *, ##__VA_ARGS__) + +#define spin_lock_init(lock) \ + PICK_SPIN_OP(_raw_spin_lock_init, _spin_lock_init, lock, #lock, \ + __FILE__, __LINE__) + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __raw_rwlock_init(raw_rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define _raw_rwlock_init(lock, name, file, line) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +#define __raw_rwlock_init(lock) \ + do { *(lock) = RAW_RW_LOCK_UNLOCKED(lock); } while (0) +# define _raw_rwlock_init(lock, name, file, line) __raw_rwlock_init(lock) +#endif + +#define rwlock_init(lock) \ + PICK_RW_OP(_raw_rwlock_init, _rwlock_init, lock, #lock, \ + __FILE__, __LINE__) + +#define __spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) + +#define spin_is_locked(lock) \ + PICK_SPIN_OP_RET(__spin_is_locked, _spin_is_locked, lock) + +#define __spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) + +#define spin_unlock_wait(lock) \ + PICK_SPIN_OP(__spin_unlock_wait, _spin_unlock_wait, lock) /* * Define the various spin_lock and rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various * methods are defined as nops in the case they are not required. */ -#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) -#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) -#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) +#define spin_trylock(lock) \ + __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock, _spin_trylock, lock)) + +#define read_trylock(lock) \ + __cond_lock(lock, PICK_RW_OP_RET(__read_trylock, _read_trylock, lock)) + +#define write_trylock(lock) \ + __cond_lock(lock, PICK_RW_OP_RET(__write_trylock, _write_trylock, lock)) + +#define write_trylock_irqsave(lock, flags) \ + __cond_lock(lock, PICK_RW_OP_RET(__write_trylock_irqsave, \ + _write_trylock_irqsave, lock, &flags)) + +#define __spin_can_lock(lock) __raw_spin_can_lock(&(lock)->raw_lock) +#define __read_can_lock(lock) __raw_read_can_lock(&(lock)->raw_lock) +#define __write_can_lock(lock) __raw_write_can_lock(&(lock)->raw_lock) + +#define read_can_lock(lock) \ + __cond_lock(lock, PICK_RW_OP_RET(__read_can_lock, _read_can_lock, lock)) + +#define write_can_lock(lock) \ + __cond_lock(lock, PICK_RW_OP_RET(__write_can_lock, _write_can_lock,\ + lock)) -#define spin_lock(lock) _spin_lock(lock) +#define spin_lock(lock) PICK_SPIN_OP(__spin_lock, _spin_lock, lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) -# define spin_lock_nest_lock(lock, nest_lock) \ - do { \ - typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ - _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ - } while (0) +# define spin_lock_nested(lock, subclass) \ + PICK_SPIN_OP(__spin_lock_nested, _spin_lock_nested, lock, subclass) #else -# define spin_lock_nested(lock, subclass) _spin_lock(lock) -# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) +# define spin_lock_nested(lock, subclass) spin_lock(lock) #endif -#define write_lock(lock) _write_lock(lock) -#define read_lock(lock) _read_lock(lock) +#define write_lock(lock) PICK_RW_OP(__write_lock, _write_lock, lock) -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +#define read_lock(lock) PICK_RW_OP(__read_lock, _read_lock, lock) -#define spin_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave(lock); \ - } while (0) -#define read_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _read_lock_irqsave(lock); \ - } while (0) -#define write_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _write_lock_irqsave(lock); \ - } while (0) +# define spin_lock_irqsave(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + flags = PICK_SPIN_OP_RET(__spin_lock_irqsave, _spin_lock_irqsave, \ + lock); \ +} while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave_nested(lock, subclass); \ - } while (0) -#else -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _spin_lock_irqsave(lock); \ - } while (0) -#endif - -#else - -#define spin_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _spin_lock_irqsave(lock, flags); \ - } while (0) -#define read_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _read_lock_irqsave(lock, flags); \ - } while (0) -#define write_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _write_lock_irqsave(lock, flags); \ - } while (0) -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - spin_lock_irqsave(lock, flags) - -#endif - -#define spin_lock_irq(lock) _spin_lock_irq(lock) -#define spin_lock_bh(lock) _spin_lock_bh(lock) - -#define read_lock_irq(lock) _read_lock_irq(lock) -#define read_lock_bh(lock) _read_lock_bh(lock) - -#define write_lock_irq(lock) _write_lock_irq(lock) -#define write_lock_bh(lock) _write_lock_bh(lock) - -/* - * We inline the unlock functions in the nondebug case: - */ -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ - !defined(CONFIG_SMP) -# define spin_unlock(lock) _spin_unlock(lock) -# define read_unlock(lock) _read_unlock(lock) -# define write_unlock(lock) _write_unlock(lock) -# define spin_unlock_irq(lock) _spin_unlock_irq(lock) -# define read_unlock_irq(lock) _read_unlock_irq(lock) -# define write_unlock_irq(lock) _write_unlock_irq(lock) -#else -# define spin_unlock(lock) \ - do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define read_unlock(lock) \ - do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define write_unlock(lock) \ - do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define spin_unlock_irq(lock) \ -do { \ - __raw_spin_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) -# define read_unlock_irq(lock) \ -do { \ - __raw_read_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) -# define write_unlock_irq(lock) \ -do { \ - __raw_write_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) -#endif - -#define spin_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _spin_unlock_irqrestore(lock, flags); \ - } while (0) -#define spin_unlock_bh(lock) _spin_unlock_bh(lock) - -#define read_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _read_unlock_irqrestore(lock, flags); \ - } while (0) -#define read_unlock_bh(lock) _read_unlock_bh(lock) - -#define write_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _write_unlock_irqrestore(lock, flags); \ - } while (0) -#define write_unlock_bh(lock) _write_unlock_bh(lock) - -#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) - -#define spin_trylock_irq(lock) \ -({ \ - local_irq_disable(); \ - spin_trylock(lock) ? \ - 1 : ({ local_irq_enable(); 0; }); \ -}) +# define spin_lock_irqsave_nested(lock, flags, subclass) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + flags = PICK_SPIN_OP_RET(__spin_lock_irqsave_nested, \ + _spin_lock_irqsave_nested, lock, subclass); \ +} while (0) +#else +# define spin_lock_irqsave_nested(lock, flags, subclass) \ + spin_lock_irqsave(lock, flags) +#endif + +# define read_lock_irqsave(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + flags = PICK_RW_OP_RET(__read_lock_irqsave, _read_lock_irqsave, lock);\ +} while (0) + +# define write_lock_irqsave(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + flags = PICK_RW_OP_RET(__write_lock_irqsave, _write_lock_irqsave,lock);\ +} while (0) + +#define spin_lock_irq(lock) PICK_SPIN_OP(__spin_lock_irq, _spin_lock_irq, lock) + +#define spin_lock_bh(lock) PICK_SPIN_OP(__spin_lock_bh, _spin_lock_bh, lock) + +#define read_lock_irq(lock) PICK_RW_OP(__read_lock_irq, _read_lock_irq, lock) + +#define read_lock_bh(lock) PICK_RW_OP(__read_lock_bh, _read_lock_bh, lock) + +#define write_lock_irq(lock) PICK_RW_OP(__write_lock_irq, _write_lock_irq, lock) + +#define write_lock_bh(lock) PICK_RW_OP(__write_lock_bh, _write_lock_bh, lock) + +#define spin_unlock(lock) PICK_SPIN_OP(__spin_unlock, _spin_unlock, lock) + +#define read_unlock(lock) PICK_RW_OP(__read_unlock, _read_unlock, lock) + +#define write_unlock(lock) PICK_RW_OP(__write_unlock, _write_unlock, lock) + +#define spin_unlock_no_resched(lock) \ + PICK_SPIN_OP(__spin_unlock_no_resched, _spin_unlock_no_resched, lock) + +#define spin_unlock_irqrestore(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + PICK_SPIN_OP(__spin_unlock_irqrestore, _spin_unlock_irqrestore, \ + lock, flags); \ +} while (0) + +#define spin_unlock_irq(lock) \ + PICK_SPIN_OP(__spin_unlock_irq, _spin_unlock_irq, lock) +#define spin_unlock_bh(lock) \ + PICK_SPIN_OP(__spin_unlock_bh, _spin_unlock_bh, lock) + +#define read_unlock_irqrestore(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + PICK_RW_OP(__read_unlock_irqrestore, _read_unlock_irqrestore, \ + lock, flags); \ +} while (0) + +#define read_unlock_irq(lock) \ + PICK_RW_OP(__read_unlock_irq, _read_unlock_irq, lock) +#define read_unlock_bh(lock) PICK_RW_OP(__read_unlock_bh, _read_unlock_bh, lock) + +#define write_unlock_irqrestore(lock, flags) \ +do { \ + BUILD_CHECK_IRQ_FLAGS(flags); \ + PICK_RW_OP(__write_unlock_irqrestore, _write_unlock_irqrestore, \ + lock, flags); \ +} while (0) +#define write_unlock_irq(lock) \ + PICK_RW_OP(__write_unlock_irq, _write_unlock_irq, lock) + +#define write_unlock_bh(lock) \ + PICK_RW_OP(__write_unlock_bh, _write_unlock_bh, lock) + +#define spin_trylock_bh(lock) \ + __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_bh, _spin_trylock_bh,\ + lock)) + +#define spin_trylock_irq(lock) \ + __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_irq, \ + _spin_trylock_irq, lock)) #define spin_trylock_irqsave(lock, flags) \ -({ \ - local_irq_save(flags); \ - spin_trylock(lock) ? \ - 1 : ({ local_irq_restore(flags); 0; }); \ -}) + __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_irqsave, \ + _spin_trylock_irqsave, lock, &flags)) -#define write_trylock_irqsave(lock, flags) \ -({ \ - local_irq_save(flags); \ - write_trylock(lock) ? \ - 1 : ({ local_irq_restore(flags); 0; }); \ -}) +/* + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. + */ +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) + while (test_and_set_bit(bitnum, addr)) + while (test_bit(bitnum, addr)) + cpu_relax(); +#endif + __acquire(bitlock); +} + +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) + if (test_and_set_bit(bitnum, addr)) + return 0; +#endif + __acquire(bitlock); + return 1; +} + +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) + BUG_ON(!test_bit(bitnum, addr)); + smp_mb__before_clear_bit(); + clear_bit(bitnum, addr); +#endif + __release(bitlock); +} + +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) + return test_bit(bitnum, addr); +#else + return 1; +#endif +} + +/** + * __raw_spin_can_lock - would __raw_spin_trylock() succeed? + * @lock: the spinlock in question. + */ +#define __raw_spin_can_lock(lock) (!__raw_spin_is_locked(lock)) /* * Pull the atomic_t declaration: @@ -359,14 +595,22 @@ do { \ * Decrements @atomic by 1. If the result is 0, returns true and locks * @lock. Returns false for all other cases. */ -extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); -#define atomic_dec_and_lock(atomic, lock) \ - __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) +/* "lock on reference count zero" */ +#ifndef ATOMIC_DEC_AND_LOCK +# include + extern int __atomic_dec_and_spin_lock(raw_spinlock_t *lock, atomic_t *atomic); +#endif + +#define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, PICK_SPIN_OP_RET(__atomic_dec_and_spin_lock, \ + _atomic_dec_and_spin_lock, lock, atomic)) /** * spin_can_lock - would spin_trylock() succeed? * @lock: the spinlock in question. */ -#define spin_can_lock(lock) (!spin_is_locked(lock)) +#define spin_can_lock(lock) \ + __cond_lock(lock, PICK_SPIN_OP_RET(__spin_can_lock, _spin_can_lock,\ + lock)) #endif /* __LINUX_SPINLOCK_H */ Index: linux-2.6-tip/include/linux/spinlock_api_smp.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock_api_smp.h +++ linux-2.6-tip/include/linux/spinlock_api_smp.h @@ -19,45 +19,60 @@ int in_lock_functions(unsigned long addr #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) -void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) - __acquires(lock); void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) __acquires(lock); -void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); -void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); -void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); -void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) - __acquires(lock); -unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) - __acquires(lock); -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) - __acquires(lock); -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) - __acquires(lock); -int __lockfunc _spin_trylock(spinlock_t *lock); -int __lockfunc _read_trylock(rwlock_t *lock); -int __lockfunc _write_trylock(rwlock_t *lock); -int __lockfunc _spin_trylock_bh(spinlock_t *lock); -void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); -void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) - __releases(lock); -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) - __releases(lock); -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) - __releases(lock); +#define ACQUIRE_SPIN __acquires(lock) +#define ACQUIRE_RW __acquires(lock) +#define RELEASE_SPIN __releases(lock) +#define RELEASE_RW __releases(lock) + +void __lockfunc __spin_lock(raw_spinlock_t *lock) ACQUIRE_SPIN; +void __lockfunc __spin_lock_nested(raw_spinlock_t *lock, int subclass) + ACQUIRE_SPIN; +void __lockfunc __read_lock(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __write_lock(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __spin_lock_bh(raw_spinlock_t *lock) ACQUIRE_SPIN; +void __lockfunc __read_lock_bh(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __write_lock_bh(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __spin_lock_irq(raw_spinlock_t *lock) ACQUIRE_SPIN; +void __lockfunc __read_lock_irq(raw_rwlock_t *lock) ACQUIRE_RW; +void __lockfunc __write_lock_irq(raw_rwlock_t *lock) ACQUIRE_RW; +unsigned long __lockfunc __spin_lock_irqsave(raw_spinlock_t *lock) + ACQUIRE_SPIN; +unsigned long __lockfunc +__spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) ACQUIRE_SPIN; +unsigned long __lockfunc __read_lock_irqsave(raw_rwlock_t *lock) + ACQUIRE_RW; +unsigned long __lockfunc __write_lock_irqsave(raw_rwlock_t *lock) + ACQUIRE_RW; +int __lockfunc __spin_trylock(raw_spinlock_t *lock); +int __lockfunc +__spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags); +int __lockfunc __read_trylock(raw_rwlock_t *lock); +int __lockfunc __write_trylock(raw_rwlock_t *lock); +int __lockfunc +__write_trylock_irqsave(raw_rwlock_t *lock, unsigned long *flags); +int __lockfunc __spin_trylock_bh(raw_spinlock_t *lock); +int __lockfunc __spin_trylock_irq(raw_spinlock_t *lock); +void __lockfunc __spin_unlock(raw_spinlock_t *lock) RELEASE_SPIN; +void __lockfunc __spin_unlock_no_resched(raw_spinlock_t *lock) + RELEASE_SPIN; +void __lockfunc __read_unlock(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __write_unlock(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __spin_unlock_bh(raw_spinlock_t *lock) RELEASE_SPIN; +void __lockfunc __read_unlock_bh(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __write_unlock_bh(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __spin_unlock_irq(raw_spinlock_t *lock) RELEASE_SPIN; +void __lockfunc __read_unlock_irq(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc __write_unlock_irq(raw_rwlock_t *lock) RELEASE_RW; +void __lockfunc +__spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) + RELEASE_SPIN; +void __lockfunc +__read_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags) + RELEASE_RW; +void +__lockfunc __write_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags) + RELEASE_RW; #endif /* __LINUX_SPINLOCK_API_SMP_H */ Index: linux-2.6-tip/include/linux/spinlock_api_up.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock_api_up.h +++ linux-2.6-tip/include/linux/spinlock_api_up.h @@ -33,12 +33,22 @@ #define __LOCK_IRQ(lock) \ do { local_irq_disable(); __LOCK(lock); } while (0) -#define __LOCK_IRQSAVE(lock, flags) \ - do { local_irq_save(flags); __LOCK(lock); } while (0) +#define __LOCK_IRQSAVE(lock) \ + ({ unsigned long __flags; local_irq_save(__flags); __LOCK(lock); __flags; }) + +#define __TRYLOCK_IRQSAVE(lock, flags) \ + ({ local_irq_save(*(flags)); __LOCK(lock); 1; }) + +#define __spin_trylock_irqsave(lock, flags) __TRYLOCK_IRQSAVE(lock, flags) + +#define __write_trylock_irqsave(lock, flags) __TRYLOCK_IRQSAVE(lock, flags) #define __UNLOCK(lock) \ do { preempt_enable(); __release(lock); (void)(lock); } while (0) +#define __UNLOCK_NO_RESCHED(lock) \ + do { __preempt_enable_no_resched(); __release(lock); (void)(lock); } while (0) + #define __UNLOCK_BH(lock) \ do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) @@ -48,34 +58,36 @@ #define __UNLOCK_IRQRESTORE(lock, flags) \ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) -#define _spin_lock(lock) __LOCK(lock) -#define _spin_lock_nested(lock, subclass) __LOCK(lock) -#define _read_lock(lock) __LOCK(lock) -#define _write_lock(lock) __LOCK(lock) -#define _spin_lock_bh(lock) __LOCK_BH(lock) -#define _read_lock_bh(lock) __LOCK_BH(lock) -#define _write_lock_bh(lock) __LOCK_BH(lock) -#define _spin_lock_irq(lock) __LOCK_IRQ(lock) -#define _read_lock_irq(lock) __LOCK_IRQ(lock) -#define _write_lock_irq(lock) __LOCK_IRQ(lock) -#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) -#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) -#define _read_trylock(lock) ({ __LOCK(lock); 1; }) -#define _write_trylock(lock) ({ __LOCK(lock); 1; }) -#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) -#define _spin_unlock(lock) __UNLOCK(lock) -#define _read_unlock(lock) __UNLOCK(lock) -#define _write_unlock(lock) __UNLOCK(lock) -#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) -#define _write_unlock_bh(lock) __UNLOCK_BH(lock) -#define _read_unlock_bh(lock) __UNLOCK_BH(lock) -#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) -#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) -#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) -#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define __spin_lock(lock) __LOCK(lock) +#define __spin_lock_nested(lock, subclass) __LOCK(lock) +#define __read_lock(lock) __LOCK(lock) +#define __write_lock(lock) __LOCK(lock) +#define __spin_lock_bh(lock) __LOCK_BH(lock) +#define __read_lock_bh(lock) __LOCK_BH(lock) +#define __write_lock_bh(lock) __LOCK_BH(lock) +#define __spin_lock_irq(lock) __LOCK_IRQ(lock) +#define __read_lock_irq(lock) __LOCK_IRQ(lock) +#define __write_lock_irq(lock) __LOCK_IRQ(lock) +#define __spin_lock_irqsave(lock) __LOCK_IRQSAVE(lock) +#define __read_lock_irqsave(lock) __LOCK_IRQSAVE(lock) +#define __write_lock_irqsave(lock) __LOCK_IRQSAVE(lock) +#define __spin_trylock(lock) ({ __LOCK(lock); 1; }) +#define __read_trylock(lock) ({ __LOCK(lock); 1; }) +#define __write_trylock(lock) ({ __LOCK(lock); 1; }) +#define __spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) +#define __spin_trylock_irq(lock) ({ __LOCK_IRQ(lock); 1; }) +#define __spin_unlock(lock) __UNLOCK(lock) +#define __spin_unlock_no_resched(lock) __UNLOCK_NO_RESCHED(lock) +#define __read_unlock(lock) __UNLOCK(lock) +#define __write_unlock(lock) __UNLOCK(lock) +#define __spin_unlock_bh(lock) __UNLOCK_BH(lock) +#define __write_unlock_bh(lock) __UNLOCK_BH(lock) +#define __read_unlock_bh(lock) __UNLOCK_BH(lock) +#define __spin_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define __read_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define __write_unlock_irq(lock) __UNLOCK_IRQ(lock) +#define __spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define __read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) +#define __write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) #endif /* __LINUX_SPINLOCK_API_UP_H */ Index: linux-2.6-tip/include/linux/spinlock_types.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock_types.h +++ linux-2.6-tip/include/linux/spinlock_types.h @@ -15,10 +15,27 @@ # include #endif +/* + * Must define these before including other files, inline functions need them + */ +#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME + +#define LOCK_SECTION_START(extra) \ + ".subsection 1\n\t" \ + extra \ + ".ifndef " LOCK_SECTION_NAME "\n\t" \ + LOCK_SECTION_NAME ":\n\t" \ + ".endif\n" + +#define LOCK_SECTION_END \ + ".previous\n\t" + +#define __lockfunc __attribute__((section(".spinlock.text"))) + #include typedef struct { - raw_spinlock_t raw_lock; + __raw_spinlock_t raw_lock; #ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif @@ -29,12 +46,12 @@ typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif -} spinlock_t; +} raw_spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead typedef struct { - raw_rwlock_t raw_lock; + __raw_rwlock_t raw_lock; #ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif @@ -45,7 +62,7 @@ typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif -} rwlock_t; +} raw_rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed @@ -64,24 +81,24 @@ typedef struct { #endif #ifdef CONFIG_DEBUG_SPINLOCK -# define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ +# define _RAW_SPIN_LOCK_UNLOCKED(lockname) \ + { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ SPIN_DEP_MAP_INIT(lockname) } -#define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ +#define _RAW_RW_LOCK_UNLOCKED(lockname) \ + { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ RW_DEP_MAP_INIT(lockname) } #else -# define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ +# define _RAW_SPIN_LOCK_UNLOCKED(lockname) \ + { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ SPIN_DEP_MAP_INIT(lockname) } -#define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ +# define _RAW_RW_LOCK_UNLOCKED(lockname) \ + { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ RW_DEP_MAP_INIT(lockname) } #endif @@ -91,10 +108,22 @@ typedef struct { * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. */ -#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) -#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) -#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) +# define RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) _RAW_SPIN_LOCK_UNLOCKED(lockname) + +# define RAW_RW_LOCK_UNLOCKED(lockname) \ + (raw_rwlock_t) _RAW_RW_LOCK_UNLOCKED(lockname) + +#define DEFINE_RAW_SPINLOCK(name) \ + raw_spinlock_t name __cacheline_aligned_in_smp = \ + RAW_SPIN_LOCK_UNLOCKED(name) + +#define __DEFINE_RAW_SPINLOCK(name) \ + raw_spinlock_t name = RAW_SPIN_LOCK_UNLOCKED(name) + +#define DEFINE_RAW_RWLOCK(name) \ + raw_rwlock_t name __cacheline_aligned_in_smp = \ + RAW_RW_LOCK_UNLOCKED(name) #endif /* __LINUX_SPINLOCK_TYPES_H */ Index: linux-2.6-tip/include/linux/spinlock_types_up.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock_types_up.h +++ linux-2.6-tip/include/linux/spinlock_types_up.h @@ -16,13 +16,13 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } #else -typedef struct { } raw_spinlock_t; +typedef struct { } __raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { } @@ -30,7 +30,7 @@ typedef struct { } raw_spinlock_t; typedef struct { /* no debug version on UP */ -} raw_rwlock_t; +} __raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { } Index: linux-2.6-tip/include/linux/spinlock_up.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock_up.h +++ linux-2.6-tip/include/linux/spinlock_up.h @@ -20,19 +20,19 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define __raw_spin_is_locked(x) ((x)->slock == 0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(__raw_spinlock_t *lock) { lock->slock = 0; } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +__raw_spin_lock_flags(__raw_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(__raw_spinlock_t *lock) { char oldval = lock->slock; @@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw return oldval > 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(__raw_spinlock_t *lock) { lock->slock = 1; } Index: linux-2.6-tip/kernel/Makefile =================================================================== --- linux-2.6-tip.orig/kernel/Makefile +++ linux-2.6-tip/kernel/Makefile @@ -7,7 +7,7 @@ obj-y = sched.o fork.o exec_domain.o sysctl.o capability.o ptrace.o timer.o user.o \ signal.o sys.o kmod.o workqueue.o pid.o \ rcupdate.o extable.o params.o posix-timers.o \ - kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ + kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ async.o @@ -27,7 +27,10 @@ obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ +ifneq ($(CONFIG_PREEMPT_RT),y) +obj-y += mutex.o obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o +endif obj-$(CONFIG_LOCKDEP) += lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o @@ -39,6 +42,7 @@ endif obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o +obj-$(CONFIG_PREEMPT_RT) += rt.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o ifneq ($(CONFIG_SMP),y) Index: linux-2.6-tip/kernel/fork.c =================================================================== --- linux-2.6-tip.orig/kernel/fork.c +++ linux-2.6-tip/kernel/fork.c @@ -80,7 +80,11 @@ int max_threads; /* tunable limit on nr DEFINE_PER_CPU(unsigned long, process_counts) = 0; +#ifdef CONFIG_PREEMPT_RT +DEFINE_RWLOCK(tasklist_lock); /* outer */ +#else __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ +#endif DEFINE_TRACE(sched_process_fork); @@ -921,6 +925,9 @@ static void rt_mutex_init_task(struct ta #ifdef CONFIG_RT_MUTEXES plist_head_init(&p->pi_waiters, &p->pi_lock); p->pi_blocked_on = NULL; +# ifdef CONFIG_DEBUG_RT_MUTEXES + p->last_kernel_lock = NULL; +# endif #endif } @@ -1127,6 +1134,9 @@ static struct task_struct *copy_process( retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); if (retval) goto bad_fork_cleanup_io; +#ifdef CONFIG_DEBUG_PREEMPT + p->lock_count = 0; +#endif if (pid != &init_struct_pid) { retval = -ENOMEM; Index: linux-2.6-tip/kernel/futex.c =================================================================== --- linux-2.6-tip.orig/kernel/futex.c +++ linux-2.6-tip/kernel/futex.c @@ -1983,7 +1983,11 @@ static int __init futex_init(void) futex_cmpxchg_enabled = 1; for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { +#ifdef CONFIG_PREEMPT_RT + plist_head_init(&futex_queues[i].chain, NULL); +#else plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); +#endif spin_lock_init(&futex_queues[i].lock); } Index: linux-2.6-tip/kernel/lockdep.c =================================================================== --- linux-2.6-tip.orig/kernel/lockdep.c +++ linux-2.6-tip/kernel/lockdep.c @@ -70,7 +70,7 @@ module_param(lock_stat, int, 0644); * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ -static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static __raw_spinlock_t lockdep_lock = (__raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { Index: linux-2.6-tip/kernel/rt.c =================================================================== --- /dev/null +++ linux-2.6-tip/kernel/rt.c @@ -0,0 +1,634 @@ +/* + * kernel/rt.c + * + * Real-Time Preemption Support + * + * started by Ingo Molnar: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * + * historic credit for proving that Linux spinlocks can be implemented via + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow + * and others) who prototyped it on 2.4 and did lots of comparative + * research and analysis; TimeSys, for proving that you can implement a + * fully preemptible kernel via the use of IRQ threading and mutexes; + * Bill Huey for persuasively arguing on lkml that the mutex model is the + * right one; and to MontaVista, who ported pmutexes to 2.6. + * + * This code is a from-scratch implementation and is not based on pmutexes, + * but the idea of converting spinlocks to mutexes is used here too. + * + * lock debugging, locking tree, deadlock detection: + * + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey + * Released under the General Public License (GPL). + * + * Includes portions of the generic R/W semaphore implementation from: + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from idea by Andrea Arcangeli + * - Derived also from comments by Linus + * + * Pending ownership of locks and ownership stealing: + * + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt + * + * (also by Steven Rostedt) + * - Converted single pi_lock to individual task locks. + * + * By Esben Nielsen: + * Doing priority inheritance with help of the scheduler. + * + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * - major rework based on Esben Nielsens initial patch + * - replaced thread_info references by task_struct refs + * - removed task->pending_owner dependency + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks + * in the scheduler return path as discussed with Steven Rostedt + * + * Copyright (C) 2006, Kihon Technologies Inc. + * Steven Rostedt + * - debugged and patched Thomas Gleixner's rework. + * - added back the cmpxchg to the rework. + * - turned atomic require back on for SMP. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtmutex_common.h" + +#ifdef CONFIG_PREEMPT_RT +/* + * Unlock these on crash: + */ +void zap_rt_locks(void) +{ + //trace_lock_init(); +} +#endif + +/* + * struct mutex functions + */ +void _mutex_init(struct mutex *lock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif + __rt_mutex_init(&lock->lock, name); +} +EXPORT_SYMBOL(_mutex_init); + +void __lockfunc _mutex_lock(struct mutex *lock) +{ + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock); + +int __lockfunc _mutex_lock_interruptible(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible); + +int __lockfunc _mutex_lock_killable(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_lock_killable(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_killable); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) +{ + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock_nested); + +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible_nested); + +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + ret = rt_mutex_lock_killable(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_killable_nested); +#endif + +int __lockfunc _mutex_trylock(struct mutex *lock) +{ + int ret = rt_mutex_trylock(&lock->lock); + + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(_mutex_trylock); + +void __lockfunc _mutex_unlock(struct mutex *lock) +{ + mutex_release(&lock->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_unlock); + +/* + * rwlock_t functions + */ +int __lockfunc rt_write_trylock(rwlock_t *rwlock) +{ + int ret = rt_mutex_trylock(&rwlock->lock); + + if (ret) + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(rt_write_trylock); + +int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) +{ + *flags = 0; + return rt_write_trylock(rwlock); +} +EXPORT_SYMBOL(rt_write_trylock_irqsave); + +int __lockfunc rt_read_trylock(rwlock_t *rwlock) +{ + struct rt_mutex *lock = &rwlock->lock; + unsigned long flags; + int ret; + + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&lock->wait_lock, flags); + if (rt_mutex_real_owner(lock) == current) { + spin_unlock_irqrestore(&lock->wait_lock, flags); + rwlock->read_depth++; + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); + return 1; + } + spin_unlock_irqrestore(&lock->wait_lock, flags); + + ret = rt_mutex_trylock(lock); + if (ret) + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(rt_read_trylock); + +void __lockfunc rt_write_lock(rwlock_t *rwlock) +{ + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + __rt_spin_lock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_write_lock); + +void __lockfunc rt_read_lock(rwlock_t *rwlock) +{ + unsigned long flags; + struct rt_mutex *lock = &rwlock->lock; + + rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); + /* + * Read locks within the write lock succeed. + */ + spin_lock_irqsave(&lock->wait_lock, flags); + if (rt_mutex_real_owner(lock) == current) { + spin_unlock_irqrestore(&lock->wait_lock, flags); + rwlock->read_depth++; + return; + } + spin_unlock_irqrestore(&lock->wait_lock, flags); + __rt_spin_lock(lock); +} + +EXPORT_SYMBOL(rt_read_lock); + +void __lockfunc rt_write_unlock(rwlock_t *rwlock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + __rt_spin_unlock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_write_unlock); + +void __lockfunc rt_read_unlock(rwlock_t *rwlock) +{ + struct rt_mutex *lock = &rwlock->lock; + unsigned long flags; + + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + // TRACE_WARN_ON(lock->save_state != 1); + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&lock->wait_lock, flags); + if (rt_mutex_real_owner(lock) == current && rwlock->read_depth) { + spin_unlock_irqrestore(&lock->wait_lock, flags); + rwlock->read_depth--; + return; + } + spin_unlock_irqrestore(&lock->wait_lock, flags); + __rt_spin_unlock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_read_unlock); + +unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); + + return 0; +} +EXPORT_SYMBOL(rt_write_lock_irqsave); + +unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); + + return 0; +} +EXPORT_SYMBOL(rt_read_lock_irqsave); + +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); + lockdep_init_map(&rwlock->dep_map, name, key, 0); +#endif + __rt_mutex_init(&rwlock->lock, name); + rwlock->read_depth = 0; +} +EXPORT_SYMBOL(__rt_rwlock_init); + +/* + * rw_semaphores + */ + +void rt_up_write(struct rw_semaphore *rwsem) +{ + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_up_write); + +void rt_up_read(struct rw_semaphore *rwsem) +{ + unsigned long flags; + + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem->read_depth--; + return; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rt_mutex_unlock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_up_read); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void rt_up_read_non_owner(struct rw_semaphore *rwsem) +{ + unsigned long flags; + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem->read_depth--; + return; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rt_mutex_unlock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_up_read_non_owner); +#endif + +/* + * downgrade a write lock into a read lock + * - just wake up any readers at the front of the queue + */ +void rt_downgrade_write(struct rw_semaphore *rwsem) +{ + BUG(); +} +EXPORT_SYMBOL(rt_downgrade_write); + +int rt_down_write_trylock(struct rw_semaphore *rwsem) +{ + int ret = rt_mutex_trylock(&rwsem->lock); + + if (ret) + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(rt_down_write_trylock); + +void rt_down_write(struct rw_semaphore *rwsem) +{ + rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_down_write); + +void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) +{ + rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_down_write_nested); + +int rt_down_read_trylock(struct rw_semaphore *rwsem) +{ + unsigned long flags; + int ret; + + /* + * Read locks within the self-held write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + if (rt_mutex_real_owner(&rwsem->lock) == current) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem_acquire_read(&rwsem->dep_map, 0, 1, _RET_IP_); + rwsem->read_depth++; + return 1; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + + ret = rt_mutex_trylock(&rwsem->lock); + if (ret) + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(rt_down_read_trylock); + +static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) +{ + unsigned long flags; + + rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); + + /* + * Read locks within the write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + + if (rt_mutex_real_owner(&rwsem->lock) == current) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem->read_depth++; + return; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rt_mutex_lock(&rwsem->lock); +} + +void rt_down_read(struct rw_semaphore *rwsem) +{ + __rt_down_read(rwsem, 0); +} +EXPORT_SYMBOL(rt_down_read); + +void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) +{ + __rt_down_read(rwsem, subclass); +} +EXPORT_SYMBOL(rt_down_read_nested); + + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +/* + * Same as rt_down_read() but no lockdep calls: + */ +void rt_down_read_non_owner(struct rw_semaphore *rwsem) +{ + unsigned long flags; + /* + * Read locks within the write lock succeed. + */ + spin_lock_irqsave(&rwsem->lock.wait_lock, flags); + + if (rt_mutex_real_owner(&rwsem->lock) == current) { + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rwsem->read_depth++; + return; + } + spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); + rt_mutex_lock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_down_read_non_owner); + +#endif + +void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); + lockdep_init_map(&rwsem->dep_map, name, key, 0); +#endif + __rt_mutex_init(&rwsem->lock, name); + rwsem->read_depth = 0; +} +EXPORT_SYMBOL(__rt_rwsem_init); + +/* + * Semaphores + */ +/* + * Linux Semaphores implemented via RT-mutexes. + * + * In the down() variants we use the mutex as the semaphore blocking + * object: we always acquire it, decrease the counter and keep the lock + * locked if we did the 1->0 transition. The next down() will then block. + * + * In the up() path we atomically increase the counter and do the + * unlock if we were the one doing the 0->1 transition. + */ + +static inline void __down_complete(struct semaphore *sem) +{ + int count = atomic_dec_return(&sem->count); + + if (unlikely(count > 0)) + rt_mutex_unlock(&sem->lock); +} + +void rt_down(struct semaphore *sem) +{ + rt_mutex_lock(&sem->lock); + __down_complete(sem); +} +EXPORT_SYMBOL(rt_down); + +int rt_down_interruptible(struct semaphore *sem) +{ + int ret; + + ret = rt_mutex_lock_interruptible(&sem->lock, 0); + if (ret) + return ret; + __down_complete(sem); + return 0; +} +EXPORT_SYMBOL(rt_down_interruptible); + +int rt_down_timeout(struct semaphore *sem, long jiff) +{ + struct hrtimer_sleeper t; + struct timespec ts; + unsigned long expires = jiffies + jiff + 1; + int ret; + + /* + * rt_mutex_slowlock can use an interruptible, but this needs to + * be TASK_INTERRUPTIBLE. The down_timeout uses TASK_UNINTERRUPTIBLE. + * To handle this we loop if a signal caused the timeout and the + * we recalculate the new timeout. + * Yes Thomas, this is a hack! But we can fix it right later. + */ + do { + jiffies_to_timespec(jiff, &ts); + hrtimer_init_on_stack(&t.timer, HRTIMER_MODE_REL, CLOCK_MONOTONIC); + t.timer.expires = timespec_to_ktime(ts); + + ret = rt_mutex_timed_lock(&sem->lock, &t, 0); + if (ret != -EINTR) + break; + + /* signal occured, but the down_timeout doesn't handle them */ + jiff = expires - jiffies; + + } while (jiff > 0); + + if (!ret) + __down_complete(sem); + else + ret = -ETIME; + + return ret; +} +EXPORT_SYMBOL(rt_down_timeout); + +/* + * try to down the semaphore, 0 on success and 1 on failure. (inverted) + */ +int rt_down_trylock(struct semaphore *sem) +{ + /* + * Here we are a tiny bit different from ordinary Linux semaphores, + * because we can get 'transient' locking-failures when say a + * process decreases the count from 9 to 8 and locks/releases the + * embedded mutex internally. It would be quite complex to remove + * these transient failures so lets try it the simple way first: + */ + if (rt_mutex_trylock(&sem->lock)) { + __down_complete(sem); + return 0; + } + return 1; +} +EXPORT_SYMBOL(rt_down_trylock); + +void rt_up(struct semaphore *sem) +{ + int count; + + /* + * Disable preemption to make sure a highprio trylock-er cannot + * preempt us here and get into an infinite loop: + */ + preempt_disable(); + count = atomic_inc_return(&sem->count); + /* + * If we did the 0 -> 1 transition then we are the ones to unlock it: + */ + if (likely(count == 1)) + rt_mutex_unlock(&sem->lock); + preempt_enable(); +} +EXPORT_SYMBOL(rt_up); + +void __sema_init(struct semaphore *sem, int val, + char *name, char *file, int line) +{ + atomic_set(&sem->count, val); + switch (val) { + case 0: + __rt_mutex_init(&sem->lock, name); + rt_mutex_lock(&sem->lock); + break; + default: + __rt_mutex_init(&sem->lock, name); + break; + } +} +EXPORT_SYMBOL(__sema_init); + +void __init_MUTEX(struct semaphore *sem, char *name, char *file, + int line) +{ + __sema_init(sem, 1, name, file, line); +} +EXPORT_SYMBOL(__init_MUTEX); + Index: linux-2.6-tip/kernel/rtmutex-debug.c =================================================================== --- linux-2.6-tip.orig/kernel/rtmutex-debug.c +++ linux-2.6-tip/kernel/rtmutex-debug.c @@ -16,6 +16,7 @@ * * See rt.c in preempt-rt for proper credits and further information */ +#include #include #include #include @@ -29,61 +30,6 @@ #include "rtmutex_common.h" -# define TRACE_WARN_ON(x) WARN_ON(x) -# define TRACE_BUG_ON(x) BUG_ON(x) - -# define TRACE_OFF() \ -do { \ - if (rt_trace_on) { \ - rt_trace_on = 0; \ - console_verbose(); \ - if (spin_is_locked(¤t->pi_lock)) \ - spin_unlock(¤t->pi_lock); \ - } \ -} while (0) - -# define TRACE_OFF_NOLOCK() \ -do { \ - if (rt_trace_on) { \ - rt_trace_on = 0; \ - console_verbose(); \ - } \ -} while (0) - -# define TRACE_BUG_LOCKED() \ -do { \ - TRACE_OFF(); \ - BUG(); \ -} while (0) - -# define TRACE_WARN_ON_LOCKED(c) \ -do { \ - if (unlikely(c)) { \ - TRACE_OFF(); \ - WARN_ON(1); \ - } \ -} while (0) - -# define TRACE_BUG_ON_LOCKED(c) \ -do { \ - if (unlikely(c)) \ - TRACE_BUG_LOCKED(); \ -} while (0) - -#ifdef CONFIG_SMP -# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c) -#else -# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0) -#endif - -/* - * deadlock detection flag. We turn it off when we detect - * the first problem because we dont want to recurse back - * into the tracing code when doing error printk or - * executing a BUG(): - */ -static int rt_trace_on = 1; - static void printk_task(struct task_struct *p) { if (p) @@ -111,8 +57,8 @@ static void printk_lock(struct rt_mutex void rt_mutex_debug_task_free(struct task_struct *task) { - WARN_ON(!plist_head_empty(&task->pi_waiters)); - WARN_ON(task->pi_blocked_on); + DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters)); + DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); } /* @@ -125,7 +71,7 @@ void debug_rt_mutex_deadlock(int detect, { struct task_struct *task; - if (!rt_trace_on || detect || !act_waiter) + if (!debug_locks || detect || !act_waiter) return; task = rt_mutex_owner(act_waiter->lock); @@ -139,7 +85,7 @@ void debug_rt_mutex_print_deadlock(struc { struct task_struct *task; - if (!waiter->deadlock_lock || !rt_trace_on) + if (!waiter->deadlock_lock || !debug_locks) return; rcu_read_lock(); @@ -149,7 +95,8 @@ void debug_rt_mutex_print_deadlock(struc return; } - TRACE_OFF_NOLOCK(); + if (!debug_locks_off()) + return; printk("\n============================================\n"); printk( "[ BUG: circular locking deadlock detected! ]\n"); @@ -180,7 +127,6 @@ void debug_rt_mutex_print_deadlock(struc printk("[ turning off deadlock detection." "Please report this trace. ]\n\n"); - local_irq_disable(); } void debug_rt_mutex_lock(struct rt_mutex *lock) @@ -189,7 +135,8 @@ void debug_rt_mutex_lock(struct rt_mutex void debug_rt_mutex_unlock(struct rt_mutex *lock) { - TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); + if (debug_locks) + DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); } void @@ -199,7 +146,7 @@ debug_rt_mutex_proxy_lock(struct rt_mute void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) { - TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock)); + DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); } void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) @@ -213,9 +160,9 @@ void debug_rt_mutex_init_waiter(struct r void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) { put_pid(waiter->deadlock_task_pid); - TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); - TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); - TRACE_WARN_ON(waiter->task); + DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry)); + DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); + DEBUG_LOCKS_WARN_ON(waiter->task); memset(waiter, 0x22, sizeof(*waiter)); } @@ -231,9 +178,36 @@ void debug_rt_mutex_init(struct rt_mutex void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) { +#ifdef CONFIG_DEBUG_PREEMPT + if (task->lock_count >= MAX_LOCK_STACK) { + if (!debug_locks_off()) + return; + printk("BUG: %s/%d: lock count overflow!\n", + task->comm, task->pid); + dump_stack(); + return; + } +#ifdef CONFIG_PREEMPT_RT + task->owned_lock[task->lock_count] = lock; +#endif + task->lock_count++; +#endif } void rt_mutex_deadlock_account_unlock(struct task_struct *task) { +#ifdef CONFIG_DEBUG_PREEMPT + if (!task->lock_count) { + if (!debug_locks_off()) + return; + printk("BUG: %s/%d: lock count underflow!\n", + task->comm, task->pid); + dump_stack(); + return; + } + task->lock_count--; +#ifdef CONFIG_PREEMPT_RT + task->owned_lock[task->lock_count] = NULL; +#endif +#endif } - Index: linux-2.6-tip/kernel/rtmutex.c =================================================================== --- linux-2.6-tip.orig/kernel/rtmutex.c +++ linux-2.6-tip/kernel/rtmutex.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "rtmutex_common.h" @@ -97,6 +98,22 @@ static inline void mark_rt_mutex_waiters } #endif +int pi_initialized; + +/* + * we initialize the wait_list runtime. (Could be done build-time and/or + * boot-time.) + */ +static inline void init_lists(struct rt_mutex *lock) +{ + if (unlikely(!lock->wait_list.prio_list.prev)) { + plist_head_init(&lock->wait_list, &lock->wait_lock); +#ifdef CONFIG_DEBUG_RT_MUTEXES + pi_initialized++; +#endif + } +} + /* * Calculate task priority from the waiter list priority * @@ -253,13 +270,13 @@ static int rt_mutex_adjust_prio_chain(st plist_add(&waiter->list_entry, &lock->wait_list); /* Release the task */ - spin_unlock_irqrestore(&task->pi_lock, flags); + spin_unlock(&task->pi_lock); put_task_struct(task); /* Grab the next task */ task = rt_mutex_owner(lock); get_task_struct(task); - spin_lock_irqsave(&task->pi_lock, flags); + spin_lock(&task->pi_lock); if (waiter == rt_mutex_top_waiter(lock)) { /* Boost the owner */ @@ -277,10 +294,10 @@ static int rt_mutex_adjust_prio_chain(st __rt_mutex_adjust_prio(task); } - spin_unlock_irqrestore(&task->pi_lock, flags); + spin_unlock(&task->pi_lock); top_waiter = rt_mutex_top_waiter(lock); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); if (!detect_deadlock && waiter != top_waiter) goto out_put_task; @@ -304,7 +321,6 @@ static inline int try_to_steal_lock(stru { struct task_struct *pendowner = rt_mutex_owner(lock); struct rt_mutex_waiter *next; - unsigned long flags; if (!rt_mutex_owner_pending(lock)) return 0; @@ -312,9 +328,9 @@ static inline int try_to_steal_lock(stru if (pendowner == current) return 1; - spin_lock_irqsave(&pendowner->pi_lock, flags); + spin_lock(&pendowner->pi_lock); if (current->prio >= pendowner->prio) { - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + spin_unlock(&pendowner->pi_lock); return 0; } @@ -324,7 +340,7 @@ static inline int try_to_steal_lock(stru * priority. */ if (likely(!rt_mutex_has_waiters(lock))) { - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + spin_unlock(&pendowner->pi_lock); return 1; } @@ -332,7 +348,7 @@ static inline int try_to_steal_lock(stru next = rt_mutex_top_waiter(lock); plist_del(&next->pi_list_entry, &pendowner->pi_waiters); __rt_mutex_adjust_prio(pendowner); - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + spin_unlock(&pendowner->pi_lock); /* * We are going to steal the lock and a waiter was @@ -349,10 +365,10 @@ static inline int try_to_steal_lock(stru * might be current: */ if (likely(next->task != current)) { - spin_lock_irqsave(¤t->pi_lock, flags); + spin_lock(¤t->pi_lock); plist_add(&next->pi_list_entry, ¤t->pi_waiters); __rt_mutex_adjust_prio(current); - spin_unlock_irqrestore(¤t->pi_lock, flags); + spin_unlock(¤t->pi_lock); } return 1; } @@ -411,14 +427,13 @@ static int try_to_take_rt_mutex(struct r */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, - int detect_deadlock) + int detect_deadlock, unsigned long flags) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; - unsigned long flags; int chain_walk = 0, res; - spin_lock_irqsave(¤t->pi_lock, flags); + spin_lock(¤t->pi_lock); __rt_mutex_adjust_prio(current); waiter->task = current; waiter->lock = lock; @@ -432,17 +447,17 @@ static int task_blocks_on_rt_mutex(struc current->pi_blocked_on = waiter; - spin_unlock_irqrestore(¤t->pi_lock, flags); + spin_unlock(¤t->pi_lock); if (waiter == rt_mutex_top_waiter(lock)) { - spin_lock_irqsave(&owner->pi_lock, flags); + spin_lock(&owner->pi_lock); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; - spin_unlock_irqrestore(&owner->pi_lock, flags); + spin_unlock(&owner->pi_lock); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; @@ -457,12 +472,12 @@ static int task_blocks_on_rt_mutex(struc */ get_task_struct(owner); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, current); - spin_lock(&lock->wait_lock); + spin_lock_irq(&lock->wait_lock); return res; } @@ -475,13 +490,12 @@ static int task_blocks_on_rt_mutex(struc * * Called with lock->wait_lock held. */ -static void wakeup_next_waiter(struct rt_mutex *lock) +static void wakeup_next_waiter(struct rt_mutex *lock, int savestate) { struct rt_mutex_waiter *waiter; struct task_struct *pendowner; - unsigned long flags; - spin_lock_irqsave(¤t->pi_lock, flags); + spin_lock(¤t->pi_lock); waiter = rt_mutex_top_waiter(lock); plist_del(&waiter->list_entry, &lock->wait_list); @@ -498,7 +512,7 @@ static void wakeup_next_waiter(struct rt rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); - spin_unlock_irqrestore(¤t->pi_lock, flags); + spin_unlock(¤t->pi_lock); /* * Clear the pi_blocked_on variable and enqueue a possible @@ -507,7 +521,7 @@ static void wakeup_next_waiter(struct rt * waiter with higher priority than pending-owner->normal_prio * is blocked on the unboosted (pending) owner. */ - spin_lock_irqsave(&pendowner->pi_lock, flags); + spin_lock(&pendowner->pi_lock); WARN_ON(!pendowner->pi_blocked_on); WARN_ON(pendowner->pi_blocked_on != waiter); @@ -521,9 +535,12 @@ static void wakeup_next_waiter(struct rt next = rt_mutex_top_waiter(lock); plist_add(&next->pi_list_entry, &pendowner->pi_waiters); } - spin_unlock_irqrestore(&pendowner->pi_lock, flags); + spin_unlock(&pendowner->pi_lock); - wake_up_process(pendowner); + if (savestate) + wake_up_process_mutex(pendowner); + else + wake_up_process(pendowner); } /* @@ -532,22 +549,22 @@ static void wakeup_next_waiter(struct rt * Must be called with lock->wait_lock held */ static void remove_waiter(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter) + struct rt_mutex_waiter *waiter, + unsigned long flags) { int first = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); - unsigned long flags; int chain_walk = 0; - spin_lock_irqsave(¤t->pi_lock, flags); + spin_lock(¤t->pi_lock); plist_del(&waiter->list_entry, &lock->wait_list); waiter->task = NULL; current->pi_blocked_on = NULL; - spin_unlock_irqrestore(¤t->pi_lock, flags); + spin_unlock(¤t->pi_lock); if (first && owner != current) { - spin_lock_irqsave(&owner->pi_lock, flags); + spin_lock(&owner->pi_lock); plist_del(&waiter->pi_list_entry, &owner->pi_waiters); @@ -562,7 +579,7 @@ static void remove_waiter(struct rt_mute if (owner->pi_blocked_on) chain_walk = 1; - spin_unlock_irqrestore(&owner->pi_lock, flags); + spin_unlock(&owner->pi_lock); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); @@ -573,11 +590,11 @@ static void remove_waiter(struct rt_mute /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); - spin_lock(&lock->wait_lock); + spin_lock_irq(&lock->wait_lock); } /* @@ -598,14 +615,302 @@ void rt_mutex_adjust_pi(struct task_stru return; } - spin_unlock_irqrestore(&task->pi_lock, flags); - /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); + spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } /* + * preemptible spin_lock functions: + */ + +#ifdef CONFIG_PREEMPT_RT + +static inline void +rt_spin_lock_fastlock(struct rt_mutex *lock, + void (*slowfn)(struct rt_mutex *lock)) +{ + might_sleep(); + + if (likely(rt_mutex_cmpxchg(lock, NULL, current))) + rt_mutex_deadlock_account_lock(lock, current); + else + slowfn(lock); +} + +static inline void +rt_spin_lock_fastunlock(struct rt_mutex *lock, + void (*slowfn)(struct rt_mutex *lock)) +{ + if (likely(rt_mutex_cmpxchg(lock, current, NULL))) + rt_mutex_deadlock_account_unlock(current); + else + slowfn(lock); +} + +/* + * Slow path lock function spin_lock style: this variant is very + * careful not to miss any non-lock wakeups. + * + * The wakeup side uses wake_up_process_mutex, which, combined with + * the xchg code of this function is a transparent sleep/wakeup + * mechanism nested within any existing sleep/wakeup mechanism. This + * enables the seemless use of arbitrary (blocking) spinlocks within + * sleep/wakeup event loops. + */ +static void noinline __sched +rt_spin_lock_slowlock(struct rt_mutex *lock) +{ + struct rt_mutex_waiter waiter; + unsigned long saved_state, state, flags; + + debug_rt_mutex_init_waiter(&waiter); + waiter.task = NULL; + + spin_lock_irqsave(&lock->wait_lock, flags); + init_lists(lock); + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock)) { + spin_unlock_irqrestore(&lock->wait_lock, flags); + return; + } + + BUG_ON(rt_mutex_owner(lock) == current); + + /* + * Here we save whatever state the task was in originally, + * we'll restore it at the end of the function and we'll take + * any intermediate wakeup into account as well, independently + * of the lock sleep/wakeup mechanism. When we get a real + * wakeup the task->state is TASK_RUNNING and we change + * saved_state accordingly. If we did not get a real wakeup + * then we return with the saved state. + */ + saved_state = xchg(¤t->state, TASK_UNINTERRUPTIBLE); + + for (;;) { + unsigned long saved_flags; + int saved_lock_depth = current->lock_depth; + + /* Try to acquire the lock */ + if (try_to_take_rt_mutex(lock)) + break; + /* + * waiter.task is NULL the first time we come here and + * when we have been woken up by the previous owner + * but the lock got stolen by an higher prio task. + */ + if (!waiter.task) { + task_blocks_on_rt_mutex(lock, &waiter, 0, flags); + /* Wakeup during boost ? */ + if (unlikely(!waiter.task)) + continue; + } + + /* + * Prevent schedule() to drop BKL, while waiting for + * the lock ! We restore lock_depth when we come back. + */ + saved_flags = current->flags & PF_NOSCHED; + current->lock_depth = -1; + current->flags &= ~PF_NOSCHED; + spin_unlock_irqrestore(&lock->wait_lock, flags); + + debug_rt_mutex_print_deadlock(&waiter); + + schedule_rt_mutex(lock); + + spin_lock_irqsave(&lock->wait_lock, flags); + current->flags |= saved_flags; + current->lock_depth = saved_lock_depth; + state = xchg(¤t->state, TASK_UNINTERRUPTIBLE); + if (unlikely(state == TASK_RUNNING)) + saved_state = TASK_RUNNING; + } + + state = xchg(¤t->state, saved_state); + if (unlikely(state == TASK_RUNNING)) + current->state = TASK_RUNNING; + + /* + * Extremely rare case, if we got woken up by a non-mutex wakeup, + * and we managed to steal the lock despite us not being the + * highest-prio waiter (due to SCHED_OTHER changing prio), then we + * can end up with a non-NULL waiter.task: + */ + if (unlikely(waiter.task)) + remove_waiter(lock, &waiter, flags); + /* + * try_to_take_rt_mutex() sets the waiter bit + * unconditionally. We might have to fix that up: + */ + fixup_rt_mutex_waiters(lock); + + spin_unlock_irqrestore(&lock->wait_lock, flags); + + debug_rt_mutex_free_waiter(&waiter); +} + +/* + * Slow path to release a rt_mutex spin_lock style + */ +static void noinline __sched +rt_spin_lock_slowunlock(struct rt_mutex *lock) +{ + unsigned long flags; + + spin_lock_irqsave(&lock->wait_lock, flags); + + debug_rt_mutex_unlock(lock); + + rt_mutex_deadlock_account_unlock(current); + + if (!rt_mutex_has_waiters(lock)) { + lock->owner = NULL; + spin_unlock_irqrestore(&lock->wait_lock, flags); + return; + } + + wakeup_next_waiter(lock, 1); + + spin_unlock_irqrestore(&lock->wait_lock, flags); + + /* Undo pi boosting.when necessary */ + rt_mutex_adjust_prio(current); +} + +void __lockfunc rt_spin_lock(spinlock_t *lock) +{ + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +} +EXPORT_SYMBOL(rt_spin_lock); + +void __lockfunc __rt_spin_lock(struct rt_mutex *lock) +{ + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); +} +EXPORT_SYMBOL(__rt_spin_lock); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) +{ + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); +} +EXPORT_SYMBOL(rt_spin_lock_nested); + +#endif + +void __lockfunc rt_spin_unlock(spinlock_t *lock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(rt_spin_unlock); + +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) +{ + rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(__rt_spin_unlock); + +/* + * Wait for the lock to get unlocked: instead of polling for an unlock + * (like raw spinlocks do), we lock and unlock, to force the kernel to + * schedule if there's contention: + */ +void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) +{ + spin_lock(lock); + spin_unlock(lock); +} +EXPORT_SYMBOL(rt_spin_unlock_wait); + +int __lockfunc rt_spin_trylock(spinlock_t *lock) +{ + int ret = rt_mutex_trylock(&lock->lock); + + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock); + +int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) +{ + int ret; + + *flags = 0; + ret = rt_mutex_trylock(&lock->lock); + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock_irqsave); + +int _atomic_dec_and_spin_lock(spinlock_t *lock, atomic_t *atomic) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + rt_spin_lock(lock); + if (atomic_dec_and_test(atomic)) + return 1; + rt_spin_unlock(lock); + return 0; +} +EXPORT_SYMBOL(_atomic_dec_and_spin_lock); + +void +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif + __rt_mutex_init(&lock->lock, name); +} +EXPORT_SYMBOL(__rt_spin_lock_init); + +#endif + +static inline int rt_release_bkl(struct rt_mutex *lock, unsigned long flags) +{ + int saved_lock_depth = current->lock_depth; + + current->lock_depth = -1; + /* + * try_to_take_lock set the waiters, make sure it's + * still correct. + */ + fixup_rt_mutex_waiters(lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); + + up(&kernel_sem); + + spin_lock_irq(&lock->wait_lock); + + return saved_lock_depth; +} + +static inline void rt_reacquire_bkl(int saved_lock_depth) +{ + down(&kernel_sem); + current->lock_depth = saved_lock_depth; +} + +/* * Slow path lock function: */ static int __sched @@ -613,20 +918,29 @@ rt_mutex_slowlock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock) { + int ret = 0, saved_lock_depth = -1; struct rt_mutex_waiter waiter; - int ret = 0; + unsigned long flags; debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; - spin_lock(&lock->wait_lock); + spin_lock_irqsave(&lock->wait_lock, flags); + init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); return 0; } + /* + * We drop the BKL here before we go into the wait loop to avoid a + * possible deadlock in the scheduler. + */ + if (unlikely(current->lock_depth >= 0)) + saved_lock_depth = rt_release_bkl(lock, flags); + set_current_state(state); /* Setup the timer, when timeout != NULL */ @@ -637,6 +951,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, } for (;;) { + unsigned long saved_flags; + /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock)) break; @@ -662,7 +978,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, */ if (!waiter.task) { ret = task_blocks_on_rt_mutex(lock, &waiter, - detect_deadlock); + detect_deadlock, flags); /* * If we got woken up by the owner then start loop * all over without going into schedule to try @@ -681,22 +997,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, if (unlikely(ret)) break; } + saved_flags = current->flags & PF_NOSCHED; + current->flags &= ~PF_NOSCHED; - spin_unlock(&lock->wait_lock); + spin_unlock_irq(&lock->wait_lock); debug_rt_mutex_print_deadlock(&waiter); if (waiter.task) schedule_rt_mutex(lock); - spin_lock(&lock->wait_lock); + spin_lock_irq(&lock->wait_lock); + + current->flags |= saved_flags; set_current_state(state); } set_current_state(TASK_RUNNING); if (unlikely(waiter.task)) - remove_waiter(lock, &waiter); + remove_waiter(lock, &waiter, flags); /* * try_to_take_rt_mutex() sets the waiter bit @@ -704,7 +1024,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, */ fixup_rt_mutex_waiters(lock); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); /* Remove pending timer: */ if (unlikely(timeout)) @@ -718,6 +1038,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, if (unlikely(ret)) rt_mutex_adjust_prio(current); + /* Must we reaquire the BKL? */ + if (unlikely(saved_lock_depth >= 0)) + rt_reacquire_bkl(saved_lock_depth); + debug_rt_mutex_free_waiter(&waiter); return ret; @@ -729,12 +1053,15 @@ rt_mutex_slowlock(struct rt_mutex *lock, static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) { + unsigned long flags; int ret = 0; - spin_lock(&lock->wait_lock); + spin_lock_irqsave(&lock->wait_lock, flags); if (likely(rt_mutex_owner(lock) != current)) { + init_lists(lock); + ret = try_to_take_rt_mutex(lock); /* * try_to_take_rt_mutex() sets the lock waiters @@ -743,7 +1070,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo fixup_rt_mutex_waiters(lock); } - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); return ret; } @@ -754,7 +1081,9 @@ rt_mutex_slowtrylock(struct rt_mutex *lo static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { - spin_lock(&lock->wait_lock); + unsigned long flags; + + spin_lock_irqsave(&lock->wait_lock, flags); debug_rt_mutex_unlock(lock); @@ -762,13 +1091,13 @@ rt_mutex_slowunlock(struct rt_mutex *loc if (!rt_mutex_has_waiters(lock)) { lock->owner = NULL; - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); return; } - wakeup_next_waiter(lock); + wakeup_next_waiter(lock, 0); - spin_unlock(&lock->wait_lock); + spin_unlock_irqrestore(&lock->wait_lock, flags); /* Undo pi boosting if necessary: */ rt_mutex_adjust_prio(current); @@ -830,6 +1159,27 @@ rt_mutex_fastunlock(struct rt_mutex *loc } /** + * rt_mutex_lock_killable - lock a rt_mutex killable + * + * @lock: the rt_mutex to be locked + * @detect_deadlock: deadlock detection on/off + * + * Returns: + * 0 on success + * -EINTR when interrupted by a signal + * -EDEADLK when the lock would deadlock (when deadlock detection is on) + */ +int __sched rt_mutex_lock_killable(struct rt_mutex *lock, + int detect_deadlock) +{ + might_sleep(); + + return rt_mutex_fastlock(lock, TASK_KILLABLE, + detect_deadlock, rt_mutex_slowlock); +} +EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); + +/** * rt_mutex_lock - lock a rt_mutex * * @lock: the rt_mutex to be locked Index: linux-2.6-tip/kernel/rwsem.c =================================================================== --- linux-2.6-tip.orig/kernel/rwsem.c +++ linux-2.6-tip/kernel/rwsem.c @@ -16,7 +16,7 @@ /* * lock for reading */ -void __sched down_read(struct rw_semaphore *sem) +void __sched compat_down_read(struct compat_rw_semaphore *sem) { might_sleep(); rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); @@ -24,12 +24,12 @@ void __sched down_read(struct rw_semapho LOCK_CONTENDED(sem, __down_read_trylock, __down_read); } -EXPORT_SYMBOL(down_read); +EXPORT_SYMBOL(compat_down_read); /* * trylock for reading -- returns 1 if successful, 0 if contention */ -int down_read_trylock(struct rw_semaphore *sem) +int compat_down_read_trylock(struct compat_rw_semaphore *sem) { int ret = __down_read_trylock(sem); @@ -38,12 +38,12 @@ int down_read_trylock(struct rw_semaphor return ret; } -EXPORT_SYMBOL(down_read_trylock); +EXPORT_SYMBOL(compat_down_read_trylock); /* * lock for writing */ -void __sched down_write(struct rw_semaphore *sem) +void __sched compat_down_write(struct compat_rw_semaphore *sem) { might_sleep(); rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); @@ -51,12 +51,12 @@ void __sched down_write(struct rw_semaph LOCK_CONTENDED(sem, __down_write_trylock, __down_write); } -EXPORT_SYMBOL(down_write); +EXPORT_SYMBOL(compat_down_write); /* * trylock for writing -- returns 1 if successful, 0 if contention */ -int down_write_trylock(struct rw_semaphore *sem) +int compat_down_write_trylock(struct compat_rw_semaphore *sem) { int ret = __down_write_trylock(sem); @@ -65,36 +65,36 @@ int down_write_trylock(struct rw_semapho return ret; } -EXPORT_SYMBOL(down_write_trylock); +EXPORT_SYMBOL(compat_down_write_trylock); /* * release a read lock */ -void up_read(struct rw_semaphore *sem) +void compat_up_read(struct compat_rw_semaphore *sem) { rwsem_release(&sem->dep_map, 1, _RET_IP_); __up_read(sem); } -EXPORT_SYMBOL(up_read); +EXPORT_SYMBOL(compat_up_read); /* * release a write lock */ -void up_write(struct rw_semaphore *sem) +void compat_up_write(struct compat_rw_semaphore *sem) { rwsem_release(&sem->dep_map, 1, _RET_IP_); __up_write(sem); } -EXPORT_SYMBOL(up_write); +EXPORT_SYMBOL(compat_up_write); /* * downgrade write lock to read lock */ -void downgrade_write(struct rw_semaphore *sem) +void compat_downgrade_write(struct compat_rw_semaphore *sem) { /* * lockdep: a downgraded write will live on as a write @@ -103,11 +103,11 @@ void downgrade_write(struct rw_semaphore __downgrade_write(sem); } -EXPORT_SYMBOL(downgrade_write); +EXPORT_SYMBOL(compat_downgrade_write); #ifdef CONFIG_DEBUG_LOCK_ALLOC -void down_read_nested(struct rw_semaphore *sem, int subclass) +void compat_down_read_nested(struct compat_rw_semaphore *sem, int subclass) { might_sleep(); rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); @@ -115,18 +115,18 @@ void down_read_nested(struct rw_semaphor LOCK_CONTENDED(sem, __down_read_trylock, __down_read); } -EXPORT_SYMBOL(down_read_nested); +EXPORT_SYMBOL(compat_down_read_nested); -void down_read_non_owner(struct rw_semaphore *sem) +void compat_down_read_non_owner(struct compat_rw_semaphore *sem) { might_sleep(); __down_read(sem); } -EXPORT_SYMBOL(down_read_non_owner); +EXPORT_SYMBOL(compat_down_read_non_owner); -void down_write_nested(struct rw_semaphore *sem, int subclass) +void compat_down_write_nested(struct compat_rw_semaphore *sem, int subclass) { might_sleep(); rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); @@ -134,14 +134,14 @@ void down_write_nested(struct rw_semapho LOCK_CONTENDED(sem, __down_write_trylock, __down_write); } -EXPORT_SYMBOL(down_write_nested); +EXPORT_SYMBOL(compat_down_write_nested); -void up_read_non_owner(struct rw_semaphore *sem) +void compat_up_read_non_owner(struct compat_rw_semaphore *sem) { __up_read(sem); } -EXPORT_SYMBOL(up_read_non_owner); +EXPORT_SYMBOL(compat_up_read_non_owner); #endif Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -2394,7 +2394,8 @@ void task_oncpu_function_call(struct tas * * returns failure only if the task is already active. */ -static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) +static int +try_to_wake_up(struct task_struct *p, unsigned int state, int sync, int mutex) { int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags; @@ -2516,13 +2517,31 @@ out: int wake_up_process(struct task_struct *p) { - return try_to_wake_up(p, TASK_ALL, 0); + return try_to_wake_up(p, TASK_ALL, 0, 0); } EXPORT_SYMBOL(wake_up_process); +int wake_up_process_sync(struct task_struct * p) +{ + return try_to_wake_up(p, TASK_ALL, 1, 0); +} +EXPORT_SYMBOL(wake_up_process_sync); + +int wake_up_process_mutex(struct task_struct * p) +{ + return try_to_wake_up(p, TASK_ALL, 0, 1); +} +EXPORT_SYMBOL(wake_up_process_mutex); + +int wake_up_process_mutex_sync(struct task_struct * p) +{ + return try_to_wake_up(p, TASK_ALL, 1, 1); +} +EXPORT_SYMBOL(wake_up_process_mutex_sync); + int wake_up_state(struct task_struct *p, unsigned int state) { - return try_to_wake_up(p, state, 0); + return try_to_wake_up(p, state | TASK_RUNNING_MUTEX, 0, 0); } /* @@ -5017,7 +5036,8 @@ asmlinkage void __sched preempt_schedule int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) { - return try_to_wake_up(curr->private, mode, sync); + return try_to_wake_up(curr->private, mode | TASK_RUNNING_MUTEX, + sync, 0); } EXPORT_SYMBOL(default_wake_function); @@ -5057,7 +5077,7 @@ void __wake_up(wait_queue_head_t *q, uns unsigned long flags; spin_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr_exclusive, 0, key); + __wake_up_common(q, mode, nr_exclusive, 1, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(__wake_up); @@ -5116,7 +5136,7 @@ void complete(struct completion *x) spin_lock_irqsave(&x->wait.lock, flags); x->done++; - __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); + __wake_up_common(&x->wait, TASK_NORMAL, 1, 1, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); @@ -5133,11 +5153,17 @@ void complete_all(struct completion *x) spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; - __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); + __wake_up_common(&x->wait, TASK_NORMAL, 0, 1, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); +unsigned int completion_done(struct completion *x) +{ + return x->done; +} +EXPORT_SYMBOL(completion_done); + static inline long __sched do_wait_for_common(struct completion *x, long timeout, int state) { @@ -6030,10 +6056,7 @@ SYSCALL_DEFINE0(sched_yield) * Since we are going to call schedule() anyway, there's * no need to preempt or enable interrupts: */ - __release(rq->lock); - spin_release(&rq->lock.dep_map, 1, _THIS_IP_); - _raw_spin_unlock(&rq->lock); - preempt_enable_no_resched(); + spin_unlock_no_resched(&rq->lock); schedule(); @@ -6076,13 +6099,13 @@ EXPORT_SYMBOL(_cond_resched); * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */ -int cond_resched_lock(spinlock_t *lock) +int __cond_resched_raw_spinlock(raw_spinlock_t *lock) { int resched = need_resched() && system_state == SYSTEM_RUNNING; int ret = 0; if (spin_needbreak(lock) || resched) { - spin_unlock(lock); + spin_unlock_no_resched(lock); if (resched && need_resched()) __cond_resched(); else @@ -6092,15 +6115,16 @@ int cond_resched_lock(spinlock_t *lock) } return ret; } -EXPORT_SYMBOL(cond_resched_lock); +EXPORT_SYMBOL(__cond_resched_raw_spinlock); /* * Voluntarily preempt a process context that has softirqs disabled: */ int __sched cond_resched_softirq(void) { +#ifndef CONFIG_PREEMPT_RT WARN_ON_ONCE(!in_softirq()); - +#endif if (need_resched() && system_state == SYSTEM_RUNNING) { local_bh_enable(); __cond_resched(); @@ -6309,19 +6333,23 @@ void sched_show_task(struct task_struct unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; - printk(KERN_INFO "%-13.13s %c", p->comm, - state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); + printk("%-13.13s %c [%p]", p->comm, + state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?', p); #if BITS_PER_LONG == 32 - if (state == TASK_RUNNING) + if (0 && (state == TASK_RUNNING)) printk(KERN_CONT " running "); else printk(KERN_CONT " %08lx ", thread_saved_pc(p)); #else - if (state == TASK_RUNNING) + if (0 && (state == TASK_RUNNING)) printk(KERN_CONT " running task "); else printk(KERN_CONT " %016lx ", thread_saved_pc(p)); #endif + if (task_curr(p)) + printk("[curr] "); + else if (p->se.on_rq) + printk("[on rq #%d] ", task_cpu(p)); #ifdef CONFIG_DEBUG_STACK_USAGE free = stack_not_used(p); #endif Index: linux-2.6-tip/kernel/sched_clock.c =================================================================== --- linux-2.6-tip.orig/kernel/sched_clock.c +++ linux-2.6-tip/kernel/sched_clock.c @@ -52,7 +52,7 @@ struct sched_clock_data { * from within instrumentation code so we dont want to do any * instrumentation ourselves. */ - raw_spinlock_t lock; + __raw_spinlock_t lock; u64 tick_raw; u64 tick_gtod; @@ -79,7 +79,7 @@ void sched_clock_init(void) for_each_possible_cpu(cpu) { struct sched_clock_data *scd = cpu_sdc(cpu); - scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + scd->lock = (__raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; scd->tick_raw = 0; scd->tick_gtod = ktime_now; scd->clock = ktime_now; Index: linux-2.6-tip/kernel/semaphore.c =================================================================== --- linux-2.6-tip.orig/kernel/semaphore.c +++ linux-2.6-tip/kernel/semaphore.c @@ -33,11 +33,11 @@ #include #include -static noinline void __down(struct semaphore *sem); -static noinline int __down_interruptible(struct semaphore *sem); -static noinline int __down_killable(struct semaphore *sem); -static noinline int __down_timeout(struct semaphore *sem, long jiffies); -static noinline void __up(struct semaphore *sem); +static noinline void __down(struct compat_semaphore *sem); +static noinline int __down_interruptible(struct compat_semaphore *sem); +static noinline int __down_killable(struct compat_semaphore *sem); +static noinline int __down_timeout(struct compat_semaphore *sem, long jiffies); +static noinline void __up(struct compat_semaphore *sem); /** * down - acquire the semaphore @@ -50,7 +50,7 @@ static noinline void __up(struct semapho * Use of this function is deprecated, please use down_interruptible() or * down_killable() instead. */ -void down(struct semaphore *sem) +void compat_down(struct compat_semaphore *sem) { unsigned long flags; @@ -61,7 +61,7 @@ void down(struct semaphore *sem) __down(sem); spin_unlock_irqrestore(&sem->lock, flags); } -EXPORT_SYMBOL(down); +EXPORT_SYMBOL(compat_down); /** * down_interruptible - acquire the semaphore unless interrupted @@ -72,7 +72,7 @@ EXPORT_SYMBOL(down); * If the sleep is interrupted by a signal, this function will return -EINTR. * If the semaphore is successfully acquired, this function returns 0. */ -int down_interruptible(struct semaphore *sem) +int compat_down_interruptible(struct compat_semaphore *sem) { unsigned long flags; int result = 0; @@ -86,7 +86,7 @@ int down_interruptible(struct semaphore return result; } -EXPORT_SYMBOL(down_interruptible); +EXPORT_SYMBOL(compat_down_interruptible); /** * down_killable - acquire the semaphore unless killed @@ -98,7 +98,7 @@ EXPORT_SYMBOL(down_interruptible); * -EINTR. If the semaphore is successfully acquired, this function returns * 0. */ -int down_killable(struct semaphore *sem) +int compat_down_killable(struct compat_semaphore *sem) { unsigned long flags; int result = 0; @@ -112,7 +112,7 @@ int down_killable(struct semaphore *sem) return result; } -EXPORT_SYMBOL(down_killable); +EXPORT_SYMBOL(compat_down_killable); /** * down_trylock - try to acquire the semaphore, without waiting @@ -127,7 +127,7 @@ EXPORT_SYMBOL(down_killable); * Unlike mutex_trylock, this function can be used from interrupt context, * and the semaphore can be released by any task or interrupt. */ -int down_trylock(struct semaphore *sem) +int compat_down_trylock(struct compat_semaphore *sem) { unsigned long flags; int count; @@ -140,7 +140,7 @@ int down_trylock(struct semaphore *sem) return (count < 0); } -EXPORT_SYMBOL(down_trylock); +EXPORT_SYMBOL(compat_down_trylock); /** * down_timeout - acquire the semaphore within a specified time @@ -152,7 +152,7 @@ EXPORT_SYMBOL(down_trylock); * If the semaphore is not released within the specified number of jiffies, * this function returns -ETIME. It returns 0 if the semaphore was acquired. */ -int down_timeout(struct semaphore *sem, long jiffies) +int compat_down_timeout(struct compat_semaphore *sem, long jiffies) { unsigned long flags; int result = 0; @@ -166,7 +166,7 @@ int down_timeout(struct semaphore *sem, return result; } -EXPORT_SYMBOL(down_timeout); +EXPORT_SYMBOL(compat_down_timeout); /** * up - release the semaphore @@ -175,7 +175,7 @@ EXPORT_SYMBOL(down_timeout); * Release the semaphore. Unlike mutexes, up() may be called from any * context and even by tasks which have never called down(). */ -void up(struct semaphore *sem) +void compat_up(struct compat_semaphore *sem) { unsigned long flags; @@ -186,7 +186,7 @@ void up(struct semaphore *sem) __up(sem); spin_unlock_irqrestore(&sem->lock, flags); } -EXPORT_SYMBOL(up); +EXPORT_SYMBOL(compat_up); /* Functions for the contended case */ @@ -201,7 +201,7 @@ struct semaphore_waiter { * constant, and thus optimised away by the compiler. Likewise the * 'timeout' parameter for the cases without timeouts. */ -static inline int __sched __down_common(struct semaphore *sem, long state, +static inline int __sched __down_common(struct compat_semaphore *sem, long state, long timeout) { struct task_struct *task = current; @@ -233,27 +233,27 @@ static inline int __sched __down_common( return -EINTR; } -static noinline void __sched __down(struct semaphore *sem) +static noinline void __sched __down(struct compat_semaphore *sem) { __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } -static noinline int __sched __down_interruptible(struct semaphore *sem) +static noinline int __sched __down_interruptible(struct compat_semaphore *sem) { return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } -static noinline int __sched __down_killable(struct semaphore *sem) +static noinline int __sched __down_killable(struct compat_semaphore *sem) { return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT); } -static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies) +static noinline int __sched __down_timeout(struct compat_semaphore *sem, long jiffies) { return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); } -static noinline void __sched __up(struct semaphore *sem) +static noinline void __sched __up(struct compat_semaphore *sem) { struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, struct semaphore_waiter, list); Index: linux-2.6-tip/kernel/spinlock.c =================================================================== --- linux-2.6-tip.orig/kernel/spinlock.c +++ linux-2.6-tip/kernel/spinlock.c @@ -21,7 +21,7 @@ #include #include -int __lockfunc _spin_trylock(spinlock_t *lock) +int __lockfunc __spin_trylock(raw_spinlock_t *lock) { preempt_disable(); if (_raw_spin_trylock(lock)) { @@ -32,9 +32,46 @@ int __lockfunc _spin_trylock(spinlock_t preempt_enable(); return 0; } -EXPORT_SYMBOL(_spin_trylock); +EXPORT_SYMBOL(__spin_trylock); -int __lockfunc _read_trylock(rwlock_t *lock) +int __lockfunc __spin_trylock_irq(raw_spinlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + + if (_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + + __preempt_enable_no_resched(); + local_irq_enable(); + preempt_check_resched(); + + return 0; +} +EXPORT_SYMBOL(__spin_trylock_irq); + +int __lockfunc __spin_trylock_irqsave(raw_spinlock_t *lock, + unsigned long *flags) +{ + local_irq_save(*flags); + preempt_disable(); + + if (_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + + __preempt_enable_no_resched(); + local_irq_restore(*flags); + preempt_check_resched(); + + return 0; +} +EXPORT_SYMBOL(__spin_trylock_irqsave); + +int __lockfunc __read_trylock(raw_rwlock_t *lock) { preempt_disable(); if (_raw_read_trylock(lock)) { @@ -45,9 +82,9 @@ int __lockfunc _read_trylock(rwlock_t *l preempt_enable(); return 0; } -EXPORT_SYMBOL(_read_trylock); +EXPORT_SYMBOL(__read_trylock); -int __lockfunc _write_trylock(rwlock_t *lock) +int __lockfunc __write_trylock(raw_rwlock_t *lock) { preempt_disable(); if (_raw_write_trylock(lock)) { @@ -58,7 +95,21 @@ int __lockfunc _write_trylock(rwlock_t * preempt_enable(); return 0; } -EXPORT_SYMBOL(_write_trylock); +EXPORT_SYMBOL(__write_trylock); + +int __lockfunc __write_trylock_irqsave(raw_rwlock_t *lock, unsigned long *flags) +{ + int ret; + + local_irq_save(*flags); + ret = __write_trylock(lock); + if (ret) + return ret; + + local_irq_restore(*flags); + return 0; +} +EXPORT_SYMBOL(__write_trylock_irqsave); /* * If lockdep is enabled then we use the non-preemption spin-ops @@ -67,15 +118,15 @@ EXPORT_SYMBOL(_write_trylock); */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) -void __lockfunc _read_lock(rwlock_t *lock) +void __lockfunc __read_lock(raw_rwlock_t *lock) { preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); } -EXPORT_SYMBOL(_read_lock); +EXPORT_SYMBOL(__read_lock); -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) +unsigned long __lockfunc __spin_lock_irqsave(raw_spinlock_t *lock) { unsigned long flags; @@ -94,27 +145,27 @@ unsigned long __lockfunc _spin_lock_irqs #endif return flags; } -EXPORT_SYMBOL(_spin_lock_irqsave); +EXPORT_SYMBOL(__spin_lock_irqsave); -void __lockfunc _spin_lock_irq(spinlock_t *lock) +void __lockfunc __spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock_irq); +EXPORT_SYMBOL(__spin_lock_irq); -void __lockfunc _spin_lock_bh(spinlock_t *lock) +void __lockfunc __spin_lock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock_bh); +EXPORT_SYMBOL(__spin_lock_bh); -unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) +unsigned long __lockfunc __read_lock_irqsave(raw_rwlock_t *lock) { unsigned long flags; @@ -124,27 +175,27 @@ unsigned long __lockfunc _read_lock_irqs LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); return flags; } -EXPORT_SYMBOL(_read_lock_irqsave); +EXPORT_SYMBOL(__read_lock_irqsave); -void __lockfunc _read_lock_irq(rwlock_t *lock) +void __lockfunc __read_lock_irq(raw_rwlock_t *lock) { local_irq_disable(); preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); } -EXPORT_SYMBOL(_read_lock_irq); +EXPORT_SYMBOL(__read_lock_irq); -void __lockfunc _read_lock_bh(rwlock_t *lock) +void __lockfunc __read_lock_bh(raw_rwlock_t *lock) { local_bh_disable(); preempt_disable(); rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); } -EXPORT_SYMBOL(_read_lock_bh); +EXPORT_SYMBOL(__read_lock_bh); -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) +unsigned long __lockfunc __write_lock_irqsave(raw_rwlock_t *lock) { unsigned long flags; @@ -154,43 +205,43 @@ unsigned long __lockfunc _write_lock_irq LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); return flags; } -EXPORT_SYMBOL(_write_lock_irqsave); +EXPORT_SYMBOL(__write_lock_irqsave); -void __lockfunc _write_lock_irq(rwlock_t *lock) +void __lockfunc __write_lock_irq(raw_rwlock_t *lock) { local_irq_disable(); preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); } -EXPORT_SYMBOL(_write_lock_irq); +EXPORT_SYMBOL(__write_lock_irq); -void __lockfunc _write_lock_bh(rwlock_t *lock) +void __lockfunc __write_lock_bh(raw_rwlock_t *lock) { local_bh_disable(); preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); } -EXPORT_SYMBOL(_write_lock_bh); +EXPORT_SYMBOL(__write_lock_bh); -void __lockfunc _spin_lock(spinlock_t *lock) +void __lockfunc __spin_lock(raw_spinlock_t *lock) { preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock); +EXPORT_SYMBOL(__spin_lock); -void __lockfunc _write_lock(rwlock_t *lock) +void __lockfunc __write_lock(raw_rwlock_t *lock) { preempt_disable(); rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); } -EXPORT_SYMBOL(_write_lock); +EXPORT_SYMBOL(__write_lock); #else /* CONFIG_PREEMPT: */ @@ -203,7 +254,7 @@ EXPORT_SYMBOL(_write_lock); */ #define BUILD_LOCK_OPS(op, locktype) \ -void __lockfunc _##op##_lock(locktype##_t *lock) \ +void __lockfunc __##op##_lock(locktype##_t *lock) \ { \ for (;;) { \ preempt_disable(); \ @@ -213,15 +264,16 @@ void __lockfunc _##op##_lock(locktype##_ \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + while (!__raw_##op##_can_lock(&(lock)->raw_lock) && \ + (lock)->break_lock) \ + __raw_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ } \ \ -EXPORT_SYMBOL(_##op##_lock); \ +EXPORT_SYMBOL(__##op##_lock); \ \ -unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ +unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ { \ unsigned long flags; \ \ @@ -235,23 +287,24 @@ unsigned long __lockfunc _##op##_lock_ir \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ - while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + while (!__raw_##op##_can_lock(&(lock)->raw_lock) && \ + (lock)->break_lock) \ + __raw_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ return flags; \ } \ \ -EXPORT_SYMBOL(_##op##_lock_irqsave); \ +EXPORT_SYMBOL(__##op##_lock_irqsave); \ \ -void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ +void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ { \ - _##op##_lock_irqsave(lock); \ + __##op##_lock_irqsave(lock); \ } \ \ -EXPORT_SYMBOL(_##op##_lock_irq); \ +EXPORT_SYMBOL(__##op##_lock_irq); \ \ -void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ +void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ { \ unsigned long flags; \ \ @@ -260,37 +313,46 @@ void __lockfunc _##op##_lock_bh(locktype /* irq-disabling. We use the generic preemption-aware */ \ /* function: */ \ /**/ \ - flags = _##op##_lock_irqsave(lock); \ + flags = __##op##_lock_irqsave(lock); \ local_bh_disable(); \ local_irq_restore(flags); \ } \ \ -EXPORT_SYMBOL(_##op##_lock_bh) +EXPORT_SYMBOL(__##op##_lock_bh) /* * Build preemption-friendly versions of the following * lock-spinning functions: * - * _[spin|read|write]_lock() - * _[spin|read|write]_lock_irq() - * _[spin|read|write]_lock_irqsave() - * _[spin|read|write]_lock_bh() + * __[spin|read|write]_lock() + * __[spin|read|write]_lock_irq() + * __[spin|read|write]_lock_irqsave() + * __[spin|read|write]_lock_bh() */ -BUILD_LOCK_OPS(spin, spinlock); -BUILD_LOCK_OPS(read, rwlock); -BUILD_LOCK_OPS(write, rwlock); +BUILD_LOCK_OPS(spin, raw_spinlock); +BUILD_LOCK_OPS(read, raw_rwlock); +BUILD_LOCK_OPS(write, raw_rwlock); #endif /* CONFIG_PREEMPT */ #ifdef CONFIG_DEBUG_LOCK_ALLOC -void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) +void __lockfunc __spin_lock_nested(raw_spinlock_t *lock, int subclass) { preempt_disable(); spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock_nested); +EXPORT_SYMBOL(__spin_lock_nested); + +void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, + struct lockdep_map *nest_lock) +{ + preempt_disable(); + spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +} +EXPORT_SYMBOL(_spin_lock_nest_lock); unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) { @@ -311,125 +373,130 @@ unsigned long __lockfunc _spin_lock_irqs #endif return flags; } -EXPORT_SYMBOL(_spin_lock_irqsave_nested); - -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, - struct lockdep_map *nest_lock) -{ - preempt_disable(); - spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); -} -EXPORT_SYMBOL(_spin_lock_nest_lock); +EXPORT_SYMBOL(__spin_lock_irqsave_nested); #endif -void __lockfunc _spin_unlock(spinlock_t *lock) +void __lockfunc __spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); preempt_enable(); } -EXPORT_SYMBOL(_spin_unlock); +EXPORT_SYMBOL(__spin_unlock); -void __lockfunc _write_unlock(rwlock_t *lock) +void __lockfunc __spin_unlock_no_resched(raw_spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + _raw_spin_unlock(lock); + __preempt_enable_no_resched(); +} +/* not exported */ + +void __lockfunc __write_unlock(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); preempt_enable(); } -EXPORT_SYMBOL(_write_unlock); +EXPORT_SYMBOL(__write_unlock); -void __lockfunc _read_unlock(rwlock_t *lock) +void __lockfunc __read_unlock(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); preempt_enable(); } -EXPORT_SYMBOL(_read_unlock); +EXPORT_SYMBOL(__read_unlock); -void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +void __lockfunc __spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); + __preempt_enable_no_resched(); local_irq_restore(flags); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_spin_unlock_irqrestore); +EXPORT_SYMBOL(__spin_unlock_irqrestore); -void __lockfunc _spin_unlock_irq(spinlock_t *lock) +void __lockfunc __spin_unlock_irq(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); + __preempt_enable_no_resched(); local_irq_enable(); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_spin_unlock_irq); +EXPORT_SYMBOL(__spin_unlock_irq); -void __lockfunc _spin_unlock_bh(spinlock_t *lock) +void __lockfunc __spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); _raw_spin_unlock(lock); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -EXPORT_SYMBOL(_spin_unlock_bh); +EXPORT_SYMBOL(__spin_unlock_bh); -void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +void __lockfunc __read_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); + __preempt_enable_no_resched(); local_irq_restore(flags); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_read_unlock_irqrestore); +EXPORT_SYMBOL(__read_unlock_irqrestore); -void __lockfunc _read_unlock_irq(rwlock_t *lock) +void __lockfunc __read_unlock_irq(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); + __preempt_enable_no_resched(); local_irq_enable(); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_read_unlock_irq); +EXPORT_SYMBOL(__read_unlock_irq); -void __lockfunc _read_unlock_bh(rwlock_t *lock) +void __lockfunc __read_unlock_bh(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_read_unlock(lock); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -EXPORT_SYMBOL(_read_unlock_bh); +EXPORT_SYMBOL(__read_unlock_bh); -void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +void __lockfunc __write_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); + __preempt_enable_no_resched(); local_irq_restore(flags); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_write_unlock_irqrestore); +EXPORT_SYMBOL(__write_unlock_irqrestore); -void __lockfunc _write_unlock_irq(rwlock_t *lock) +void __lockfunc __write_unlock_irq(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); + __preempt_enable_no_resched(); local_irq_enable(); - preempt_enable(); + preempt_check_resched(); } -EXPORT_SYMBOL(_write_unlock_irq); +EXPORT_SYMBOL(__write_unlock_irq); -void __lockfunc _write_unlock_bh(rwlock_t *lock) +void __lockfunc __write_unlock_bh(raw_rwlock_t *lock) { rwlock_release(&lock->dep_map, 1, _RET_IP_); _raw_write_unlock(lock); - preempt_enable_no_resched(); + __preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } -EXPORT_SYMBOL(_write_unlock_bh); +EXPORT_SYMBOL(__write_unlock_bh); -int __lockfunc _spin_trylock_bh(spinlock_t *lock) +int __lockfunc __spin_trylock_bh(raw_spinlock_t *lock) { local_bh_disable(); preempt_disable(); @@ -438,11 +505,11 @@ int __lockfunc _spin_trylock_bh(spinlock return 1; } - preempt_enable_no_resched(); + __preempt_enable_no_resched(); local_bh_enable_ip((unsigned long)__builtin_return_address(0)); return 0; } -EXPORT_SYMBOL(_spin_trylock_bh); +EXPORT_SYMBOL(__spin_trylock_bh); notrace int in_lock_functions(unsigned long addr) { @@ -450,6 +517,17 @@ notrace int in_lock_functions(unsigned l extern char __lock_text_start[], __lock_text_end[]; return addr >= (unsigned long)__lock_text_start - && addr < (unsigned long)__lock_text_end; + && addr < (unsigned long)__lock_text_end; } EXPORT_SYMBOL(in_lock_functions); + +void notrace __debug_atomic_dec_and_test(atomic_t *v) +{ + static int warn_once = 1; + + if (!atomic_read(v) && warn_once) { + warn_once = 0; + printk("BUG: atomic counter underflow!\n"); + WARN_ON(1); + } +} Index: linux-2.6-tip/lib/dec_and_lock.c =================================================================== --- linux-2.6-tip.orig/lib/dec_and_lock.c +++ linux-2.6-tip/lib/dec_and_lock.c @@ -17,7 +17,7 @@ * because the spin-lock and the decrement must be * "atomic". */ -int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) +int __atomic_dec_and_spin_lock(raw_spinlock_t *lock, atomic_t *atomic) { #ifdef CONFIG_SMP /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ @@ -32,4 +32,4 @@ int _atomic_dec_and_lock(atomic_t *atomi return 0; } -EXPORT_SYMBOL(_atomic_dec_and_lock); +EXPORT_SYMBOL(__atomic_dec_and_spin_lock); Index: linux-2.6-tip/lib/kernel_lock.c =================================================================== --- linux-2.6-tip.orig/lib/kernel_lock.c +++ linux-2.6-tip/lib/kernel_lock.c @@ -24,7 +24,7 @@ * * Don't use in new code. */ -static DECLARE_MUTEX(kernel_sem); +DECLARE_MUTEX(kernel_sem); /* * Re-acquire the kernel semaphore. @@ -44,7 +44,7 @@ int __lockfunc __reacquire_kernel_lock(v BUG_ON(saved_lock_depth < 0); task->lock_depth = -1; - preempt_enable_no_resched(); + __preempt_enable_no_resched(); down(&kernel_sem); Index: linux-2.6-tip/lib/locking-selftest.c =================================================================== --- linux-2.6-tip.orig/lib/locking-selftest.c +++ linux-2.6-tip/lib/locking-selftest.c @@ -940,6 +940,9 @@ static void dotest(void (*testcase_fn)(v { unsigned long saved_preempt_count = preempt_count(); int expected_failure = 0; +#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_DEBUG_RT_MUTEXES) + int saved_lock_count = current->lock_count; +#endif WARN_ON(irqs_disabled()); @@ -989,6 +992,9 @@ static void dotest(void (*testcase_fn)(v #endif reset_locks(); +#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_DEBUG_RT_MUTEXES) + current->lock_count = saved_lock_count; +#endif } static inline void print_testname(const char *testname) Index: linux-2.6-tip/lib/plist.c =================================================================== --- linux-2.6-tip.orig/lib/plist.c +++ linux-2.6-tip/lib/plist.c @@ -54,7 +54,9 @@ static void plist_check_list(struct list static void plist_check_head(struct plist_head *head) { +#ifndef CONFIG_PREEMPT_RT WARN_ON(!head->lock); +#endif if (head->lock) WARN_ON_SMP(!spin_is_locked(head->lock)); plist_check_list(&head->prio_list); Index: linux-2.6-tip/lib/rwsem-spinlock.c =================================================================== --- linux-2.6-tip.orig/lib/rwsem-spinlock.c +++ linux-2.6-tip/lib/rwsem-spinlock.c @@ -20,7 +20,7 @@ struct rwsem_waiter { /* * initialise the semaphore */ -void __init_rwsem(struct rw_semaphore *sem, const char *name, +void __compat_init_rwsem(struct compat_rw_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -44,8 +44,8 @@ void __init_rwsem(struct rw_semaphore *s * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ -static inline struct rw_semaphore * -__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) +static inline struct compat_rw_semaphore * +__rwsem_do_wake(struct compat_rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; @@ -103,8 +103,8 @@ __rwsem_do_wake(struct rw_semaphore *sem /* * wake a single writer */ -static inline struct rw_semaphore * -__rwsem_wake_one_writer(struct rw_semaphore *sem) +static inline struct compat_rw_semaphore * +__rwsem_wake_one_writer(struct compat_rw_semaphore *sem) { struct rwsem_waiter *waiter; struct task_struct *tsk; @@ -125,7 +125,7 @@ __rwsem_wake_one_writer(struct rw_semaph /* * get a read lock on the semaphore */ -void __sched __down_read(struct rw_semaphore *sem) +void __sched __down_read(struct compat_rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -168,7 +168,7 @@ void __sched __down_read(struct rw_semap /* * trylock for reading -- returns 1 if successful, 0 if contention */ -int __down_read_trylock(struct rw_semaphore *sem) +int __down_read_trylock(struct compat_rw_semaphore *sem) { unsigned long flags; int ret = 0; @@ -191,7 +191,8 @@ int __down_read_trylock(struct rw_semaph * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ -void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) +void __sched +__down_write_nested(struct compat_rw_semaphore *sem, int subclass) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -231,7 +232,7 @@ void __sched __down_write_nested(struct ; } -void __sched __down_write(struct rw_semaphore *sem) +void __sched __down_write(struct compat_rw_semaphore *sem) { __down_write_nested(sem, 0); } @@ -239,7 +240,7 @@ void __sched __down_write(struct rw_sema /* * trylock for writing -- returns 1 if successful, 0 if contention */ -int __down_write_trylock(struct rw_semaphore *sem) +int __down_write_trylock(struct compat_rw_semaphore *sem) { unsigned long flags; int ret = 0; @@ -260,7 +261,7 @@ int __down_write_trylock(struct rw_semap /* * release a read lock on the semaphore */ -void __up_read(struct rw_semaphore *sem) +void __up_read(struct compat_rw_semaphore *sem) { unsigned long flags; @@ -275,7 +276,7 @@ void __up_read(struct rw_semaphore *sem) /* * release a write lock on the semaphore */ -void __up_write(struct rw_semaphore *sem) +void __up_write(struct compat_rw_semaphore *sem) { unsigned long flags; @@ -292,7 +293,7 @@ void __up_write(struct rw_semaphore *sem * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ -void __downgrade_write(struct rw_semaphore *sem) +void __downgrade_write(struct compat_rw_semaphore *sem) { unsigned long flags; @@ -305,7 +306,7 @@ void __downgrade_write(struct rw_semapho spin_unlock_irqrestore(&sem->wait_lock, flags); } -EXPORT_SYMBOL(__init_rwsem); +EXPORT_SYMBOL(__compat_init_rwsem); EXPORT_SYMBOL(__down_read); EXPORT_SYMBOL(__down_read_trylock); EXPORT_SYMBOL(__down_write_nested); Index: linux-2.6-tip/lib/rwsem.c =================================================================== --- linux-2.6-tip.orig/lib/rwsem.c +++ linux-2.6-tip/lib/rwsem.c @@ -11,8 +11,8 @@ /* * Initialize an rwsem: */ -void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key) +void __compat_init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -26,7 +26,7 @@ void __init_rwsem(struct rw_semaphore *s INIT_LIST_HEAD(&sem->wait_list); } -EXPORT_SYMBOL(__init_rwsem); +EXPORT_SYMBOL(__compat_init_rwsem); struct rwsem_waiter { struct list_head list; Index: linux-2.6-tip/lib/spinlock_debug.c =================================================================== --- linux-2.6-tip.orig/lib/spinlock_debug.c +++ linux-2.6-tip/lib/spinlock_debug.c @@ -13,8 +13,8 @@ #include #include -void __spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) +void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -23,16 +23,16 @@ void __spin_lock_init(spinlock_t *lock, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (__raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } -EXPORT_SYMBOL(__spin_lock_init); +EXPORT_SYMBOL(__raw_spin_lock_init); -void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key) +void __raw_rwlock_init(raw_rwlock_t *lock, const char *name, + struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -41,15 +41,15 @@ void __rwlock_init(rwlock_t *lock, const debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; + lock->raw_lock = (__raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; lock->magic = RWLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } -EXPORT_SYMBOL(__rwlock_init); +EXPORT_SYMBOL(__raw_rwlock_init); -static void spin_bug(spinlock_t *lock, const char *msg) +static void spin_bug(raw_spinlock_t *lock, const char *msg) { struct task_struct *owner = NULL; @@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, c #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) static inline void -debug_spin_lock_before(spinlock_t *lock) +debug_spin_lock_before(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(lock->owner == current, lock, "recursion"); @@ -81,13 +81,13 @@ debug_spin_lock_before(spinlock_t *lock) lock, "cpu recursion"); } -static inline void debug_spin_lock_after(spinlock_t *lock) +static inline void debug_spin_lock_after(raw_spinlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } -static inline void debug_spin_unlock(spinlock_t *lock) +static inline void debug_spin_unlock(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); @@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spi lock->owner_cpu = -1; } -static void __spin_lock_debug(spinlock_t *lock) +static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t } } -void _raw_spin_lock(spinlock_t *lock) +void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) @@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock) debug_spin_lock_after(lock); } -int _raw_spin_trylock(spinlock_t *lock) +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) { int ret = __raw_spin_trylock(&lock->raw_lock); @@ -148,13 +148,13 @@ int _raw_spin_trylock(spinlock_t *lock) return ret; } -void _raw_spin_unlock(spinlock_t *lock) +void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); __raw_spin_unlock(&lock->raw_lock); } -static void rwlock_bug(rwlock_t *lock, const char *msg) +static void rwlock_bug(raw_rwlock_t *lock, const char *msg) { if (!debug_locks_off()) return; @@ -167,8 +167,8 @@ static void rwlock_bug(rwlock_t *lock, c #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) -#if 0 /* __write_lock_debug() can lock up - maybe this can too? */ -static void __read_lock_debug(rwlock_t *lock) +#if 1 /* __write_lock_debug() can lock up - maybe this can too? */ +static void __raw_read_lock_debug(raw_rwlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -193,13 +193,13 @@ static void __read_lock_debug(rwlock_t * } #endif -void _raw_read_lock(rwlock_t *lock) +void __lockfunc _raw_read_lock(raw_rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_lock(&lock->raw_lock); + __raw_read_lock_debug(lock); } -int _raw_read_trylock(rwlock_t *lock) +int __lockfunc _raw_read_trylock(raw_rwlock_t *lock) { int ret = __raw_read_trylock(&lock->raw_lock); @@ -212,13 +212,13 @@ int _raw_read_trylock(rwlock_t *lock) return ret; } -void _raw_read_unlock(rwlock_t *lock) +void __lockfunc _raw_read_unlock(raw_rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); __raw_read_unlock(&lock->raw_lock); } -static inline void debug_write_lock_before(rwlock_t *lock) +static inline void debug_write_lock_before(raw_rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); @@ -226,13 +226,13 @@ static inline void debug_write_lock_befo lock, "cpu recursion"); } -static inline void debug_write_lock_after(rwlock_t *lock) +static inline void debug_write_lock_after(raw_rwlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } -static inline void debug_write_unlock(rwlock_t *lock) +static inline void debug_write_unlock(raw_rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); @@ -242,8 +242,8 @@ static inline void debug_write_unlock(rw lock->owner_cpu = -1; } -#if 0 /* This can cause lockups */ -static void __write_lock_debug(rwlock_t *lock) +#if 1 /* This can cause lockups */ +static void __raw_write_lock_debug(raw_rwlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; @@ -268,14 +268,14 @@ static void __write_lock_debug(rwlock_t } #endif -void _raw_write_lock(rwlock_t *lock) +void __lockfunc _raw_write_lock(raw_rwlock_t *lock) { debug_write_lock_before(lock); - __raw_write_lock(&lock->raw_lock); + __raw_write_lock_debug(lock); debug_write_lock_after(lock); } -int _raw_write_trylock(rwlock_t *lock) +int __lockfunc _raw_write_trylock(raw_rwlock_t *lock) { int ret = __raw_write_trylock(&lock->raw_lock); @@ -290,7 +290,7 @@ int _raw_write_trylock(rwlock_t *lock) return ret; } -void _raw_write_unlock(rwlock_t *lock) +void __lockfunc _raw_write_unlock(raw_rwlock_t *lock) { debug_write_unlock(lock); __raw_write_unlock(&lock->raw_lock); patches/rt-mutex-core-fixes.patch0000664000076400007640000000434311160544611016110 0ustar tglxtglxSubject: rt: mutex core fixes From: Ingo Molnar Date: Wed Feb 04 02:20:51 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/include/asm/rwsem.h | 2 +- include/linux/irqflags.h | 3 +++ include/linux/spinlock.h | 3 +++ kernel/sched.c | 6 ------ 4 files changed, 7 insertions(+), 7 deletions(-) Index: linux-2.6-tip/arch/x86/include/asm/rwsem.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/rwsem.h +++ linux-2.6-tip/arch/x86/include/asm/rwsem.h @@ -257,7 +257,7 @@ static inline int rwsem_atomic_update(in return tmp + delta; } -static inline int rwsem_is_locked(struct compat_rw_semaphore *sem) +static inline int compat_rwsem_is_locked(struct compat_rw_semaphore *sem) { return (sem->count != 0); } Index: linux-2.6-tip/include/linux/irqflags.h =================================================================== --- linux-2.6-tip.orig/include/linux/irqflags.h +++ linux-2.6-tip/include/linux/irqflags.h @@ -13,6 +13,9 @@ #include +/* dummy wrapper for now: */ +#define BUILD_CHECK_IRQ_FLAGS(flags) + #ifdef CONFIG_TRACE_IRQFLAGS extern void trace_softirqs_on(unsigned long ip); extern void trace_softirqs_off(unsigned long ip); Index: linux-2.6-tip/include/linux/spinlock.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock.h +++ linux-2.6-tip/include/linux/spinlock.h @@ -613,4 +613,7 @@ static inline int bit_spin_is_locked(int __cond_lock(lock, PICK_SPIN_OP_RET(__spin_can_lock, _spin_can_lock,\ lock)) +/* FIXME: porting hack! */ +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) + #endif /* __LINUX_SPINLOCK_H */ Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -5158,12 +5158,6 @@ void complete_all(struct completion *x) } EXPORT_SYMBOL(complete_all); -unsigned int completion_done(struct completion *x) -{ - return x->done; -} -EXPORT_SYMBOL(completion_done); - static inline long __sched do_wait_for_common(struct completion *x, long timeout, int state) { patches/rt-mutex-core-mutex-fixes.patch0000664000076400007640000000506711160544610017253 0ustar tglxtglxSubject: add -rt extra-version From: Ingo Molnar add -rt extra-version. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Steven Rostedt --- include/linux/mutex.h | 4 ++-- kernel/rt.c | 6 +++--- kernel/sched.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) Index: linux-2.6-tip/include/linux/mutex.h =================================================================== --- linux-2.6-tip.orig/include/linux/mutex.h +++ linux-2.6-tip/include/linux/mutex.h @@ -38,7 +38,7 @@ struct mutex { struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) extern void -_mutex_init(struct mutex *lock, char *name, struct lock_class_key *key); +__mutex_init(struct mutex *lock, char *name, struct lock_class_key *key); extern void __lockfunc _mutex_lock(struct mutex *lock); extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); @@ -75,7 +75,7 @@ extern void __lockfunc _mutex_unlock(str do { \ static struct lock_class_key __key; \ \ - _mutex_init((mutex), #mutex, &__key); \ + __mutex_init((mutex), #mutex, &__key); \ } while (0) #else Index: linux-2.6-tip/kernel/rt.c =================================================================== --- linux-2.6-tip.orig/kernel/rt.c +++ linux-2.6-tip/kernel/rt.c @@ -83,7 +83,7 @@ void zap_rt_locks(void) /* * struct mutex functions */ -void _mutex_init(struct mutex *lock, char *name, struct lock_class_key *key) +void __mutex_init(struct mutex *lock, char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -94,7 +94,7 @@ void _mutex_init(struct mutex *lock, cha #endif __rt_mutex_init(&lock->lock, name); } -EXPORT_SYMBOL(_mutex_init); +EXPORT_SYMBOL(__mutex_init); void __lockfunc _mutex_lock(struct mutex *lock) { @@ -550,7 +550,7 @@ int rt_down_timeout(struct semaphore *se do { jiffies_to_timespec(jiff, &ts); hrtimer_init_on_stack(&t.timer, HRTIMER_MODE_REL, CLOCK_MONOTONIC); - t.timer.expires = timespec_to_ktime(ts); + t.timer._expires = timespec_to_ktime(ts); ret = rt_mutex_timed_lock(&sem->lock, &t, 0); if (ret != -EINTR) Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -4889,7 +4889,7 @@ need_resched: } EXPORT_SYMBOL(schedule); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT) /* * Look out! "owner" is an entirely speculative pointer * access and not reliable. patches/rt-mutex-core-fixes2.patch0000664000076400007640000000337511150327144016175 0ustar tglxtglxSubject: mingo: rt spinlock fix From: Ingo Molnar Date: Sun Feb 08 17:10:09 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/spinlock_api_smp.h | 2 +- kernel/spinlock.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) Index: linux-2.6-tip/include/linux/spinlock_api_smp.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock_api_smp.h +++ linux-2.6-tip/include/linux/spinlock_api_smp.h @@ -19,7 +19,7 @@ int in_lock_functions(unsigned long addr #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) +void __lockfunc __spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) __acquires(lock); #define ACQUIRE_SPIN __acquires(lock) #define ACQUIRE_RW __acquires(lock) Index: linux-2.6-tip/kernel/spinlock.c =================================================================== --- linux-2.6-tip.orig/kernel/spinlock.c +++ linux-2.6-tip/kernel/spinlock.c @@ -345,16 +345,16 @@ void __lockfunc __spin_lock_nested(raw_s } EXPORT_SYMBOL(__spin_lock_nested); -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, +void __lockfunc __spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *nest_lock) { preempt_disable(); spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); } -EXPORT_SYMBOL(_spin_lock_nest_lock); +EXPORT_SYMBOL(__spin_lock_nest_lock); -unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) +unsigned long __lockfunc __spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) { unsigned long flags; patches/rt-mutex-fix-non-lock-kernel-config.patch0000664000076400007640000000167011150327144021070 0ustar tglxtglxSubject: rt: mingo kernel_sem fix From: Ingo Molnar Date: Sun Feb 08 19:33:10 CET 2009 Signed-off-by: Ingo Molnar --- kernel/rtmutex.c | 4 ++++ 1 file changed, 4 insertions(+) Index: linux-2.6-tip/kernel/rtmutex.c =================================================================== --- linux-2.6-tip.orig/kernel/rtmutex.c +++ linux-2.6-tip/kernel/rtmutex.c @@ -889,6 +889,7 @@ static inline int rt_release_bkl(struct { int saved_lock_depth = current->lock_depth; +#ifdef CONFIG_LOCK_KERNEL current->lock_depth = -1; /* * try_to_take_lock set the waiters, make sure it's @@ -900,14 +901,17 @@ static inline int rt_release_bkl(struct up(&kernel_sem); spin_lock_irq(&lock->wait_lock); +#endif return saved_lock_depth; } static inline void rt_reacquire_bkl(int saved_lock_depth) { +#ifdef CONFIG_LOCK_KERNEL down(&kernel_sem); current->lock_depth = saved_lock_depth; +#endif } /* patches/slub-compile-fix.patch0000664000076400007640000000425311150327144015437 0ustar tglxtglxSubject: -rt, bit-spinlocks: add __bit_spin_unlock() From: Ingo Molnar Date: Sun Feb 08 07:16:24 CET 2009 This commit: b8dc93c: bit_spin_lock: use lock bitops - introduced __bit_spin_unlock() => add that too - changed the clear-bit primitives to the _locked version => update Signed-off-by: Ingo Molnar --- include/linux/spinlock.h | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) Index: linux-2.6-tip/include/linux/spinlock.h =================================================================== --- linux-2.6-tip.orig/include/linux/spinlock.h +++ linux-2.6-tip/include/linux/spinlock.h @@ -531,7 +531,7 @@ static inline void bit_spin_lock(int bit * attempt to acquire the lock bit. */ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) - while (test_and_set_bit(bitnum, addr)) + while (unlikely(test_and_set_bit_lock(bitnum, addr))) while (test_bit(bitnum, addr)) cpu_relax(); #endif @@ -544,7 +544,7 @@ static inline void bit_spin_lock(int bit static inline int bit_spin_trylock(int bitnum, unsigned long *addr) { #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) - if (test_and_set_bit(bitnum, addr)) + if (unlikely(test_and_set_bit_lock(bitnum, addr))) return 0; #endif __acquire(bitlock); @@ -552,14 +552,29 @@ static inline int bit_spin_trylock(int b } /* - * bit-based spin_unlock() + * bit-based spin_unlock(): */ static inline void bit_spin_unlock(int bitnum, unsigned long *addr) { #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) +# ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); - smp_mb__before_clear_bit(); - clear_bit(bitnum, addr); +# endif + clear_bit_unlock(bitnum, addr); +#endif + __release(bitlock); +} + +/* + * bit-based spin_unlock() - non-atomic version: + */ +static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) +# ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +# endif + __clear_bit_unlock(bitnum, addr); #endif __release(bitlock); } patches/rt-mutex-compat-semaphores.patch0000664000076400007640000001514711150327144017476 0ustar tglxtglxSubject: patches/rt-mutex-compat-semaphores.patch Signed-off-by: Ingo Molnar --- drivers/acpi/osl.c | 12 ++++++------ drivers/media/dvb/dvb-core/dvb_frontend.c | 2 +- drivers/net/3c527.c | 2 +- drivers/net/hamradio/6pack.c | 2 +- drivers/net/hamradio/mkiss.c | 2 +- drivers/net/ppp_async.c | 2 +- drivers/pci/hotplug/ibmphp_hpc.c | 2 +- drivers/scsi/aacraid/aacraid.h | 2 +- include/linux/parport.h | 2 +- 9 files changed, 14 insertions(+), 14 deletions(-) Index: linux-2.6-tip/drivers/acpi/osl.c =================================================================== --- linux-2.6-tip.orig/drivers/acpi/osl.c +++ linux-2.6-tip/drivers/acpi/osl.c @@ -799,12 +799,12 @@ void acpi_os_delete_lock(acpi_spinlock h acpi_status acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) { - struct semaphore *sem = NULL; + struct compat_semaphore *sem = NULL; - sem = acpi_os_allocate(sizeof(struct semaphore)); + sem = acpi_os_allocate(sizeof(struct compat_semaphore)); if (!sem) return AE_NO_MEMORY; - memset(sem, 0, sizeof(struct semaphore)); + memset(sem, 0, sizeof(struct compat_semaphore)); sema_init(sem, initial_units); @@ -825,7 +825,7 @@ acpi_os_create_semaphore(u32 max_units, acpi_status acpi_os_delete_semaphore(acpi_handle handle) { - struct semaphore *sem = (struct semaphore *)handle; + struct compat_semaphore *sem = (struct compat_semaphore *)handle; if (!sem) return AE_BAD_PARAMETER; @@ -845,7 +845,7 @@ acpi_status acpi_os_delete_semaphore(acp acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) { acpi_status status = AE_OK; - struct semaphore *sem = (struct semaphore *)handle; + struct compat_semaphore *sem = (struct compat_semaphore *)handle; long jiffies; int ret = 0; @@ -886,7 +886,7 @@ acpi_status acpi_os_wait_semaphore(acpi_ */ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) { - struct semaphore *sem = (struct semaphore *)handle; + struct compat_semaphore *sem = (struct compat_semaphore *)handle; if (!sem || (units < 1)) return AE_BAD_PARAMETER; Index: linux-2.6-tip/drivers/media/dvb/dvb-core/dvb_frontend.c =================================================================== --- linux-2.6-tip.orig/drivers/media/dvb/dvb-core/dvb_frontend.c +++ linux-2.6-tip/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -101,7 +101,7 @@ struct dvb_frontend_private { struct dvb_device *dvbdev; struct dvb_frontend_parameters parameters; struct dvb_fe_events events; - struct semaphore sem; + struct compat_semaphore sem; struct list_head list_head; wait_queue_head_t wait_queue; struct task_struct *thread; Index: linux-2.6-tip/drivers/net/3c527.c =================================================================== --- linux-2.6-tip.orig/drivers/net/3c527.c +++ linux-2.6-tip/drivers/net/3c527.c @@ -181,7 +181,7 @@ struct mc32_local u16 rx_ring_tail; /* index to rx de-queue end */ - struct semaphore cmd_mutex; /* Serialises issuing of execute commands */ + struct compat_semaphore cmd_mutex; /* Serialises issuing of execute commands */ struct completion execution_cmd; /* Card has completed an execute command */ struct completion xceiver_cmd; /* Card has completed a tx or rx command */ }; Index: linux-2.6-tip/drivers/net/hamradio/6pack.c =================================================================== --- linux-2.6-tip.orig/drivers/net/hamradio/6pack.c +++ linux-2.6-tip/drivers/net/hamradio/6pack.c @@ -120,7 +120,7 @@ struct sixpack { struct timer_list tx_t; struct timer_list resync_t; atomic_t refcnt; - struct semaphore dead_sem; + struct compat_semaphore dead_sem; spinlock_t lock; }; Index: linux-2.6-tip/drivers/net/hamradio/mkiss.c =================================================================== --- linux-2.6-tip.orig/drivers/net/hamradio/mkiss.c +++ linux-2.6-tip/drivers/net/hamradio/mkiss.c @@ -84,7 +84,7 @@ struct mkiss { #define CRC_MODE_SMACK_TEST 4 atomic_t refcnt; - struct semaphore dead_sem; + struct compat_semaphore dead_sem; }; /*---------------------------------------------------------------------------*/ Index: linux-2.6-tip/drivers/net/ppp_async.c =================================================================== --- linux-2.6-tip.orig/drivers/net/ppp_async.c +++ linux-2.6-tip/drivers/net/ppp_async.c @@ -67,7 +67,7 @@ struct asyncppp { struct tasklet_struct tsk; atomic_t refcnt; - struct semaphore dead_sem; + struct compat_semaphore dead_sem; struct ppp_channel chan; /* interface to generic ppp layer */ unsigned char obuf[OBUFSIZE]; }; Index: linux-2.6-tip/drivers/pci/hotplug/ibmphp_hpc.c =================================================================== --- linux-2.6-tip.orig/drivers/pci/hotplug/ibmphp_hpc.c +++ linux-2.6-tip/drivers/pci/hotplug/ibmphp_hpc.c @@ -104,7 +104,7 @@ static int to_debug = 0; static struct mutex sem_hpcaccess; // lock access to HPC static struct semaphore semOperations; // lock all operations and // access to data structures -static struct semaphore sem_exit; // make sure polling thread goes away +static struct compat_semaphore sem_exit; // make sure polling thread goes away static struct task_struct *ibmphp_poll_thread; //---------------------------------------------------------------------------- // local function prototypes Index: linux-2.6-tip/drivers/scsi/aacraid/aacraid.h =================================================================== --- linux-2.6-tip.orig/drivers/scsi/aacraid/aacraid.h +++ linux-2.6-tip/drivers/scsi/aacraid/aacraid.h @@ -719,7 +719,7 @@ struct aac_fib_context { u32 unique; // unique value representing this context ulong jiffies; // used for cleanup - dmb changed to ulong struct list_head next; // used to link context's into a linked list - struct semaphore wait_sem; // this is used to wait for the next fib to arrive. + struct compat_semaphore wait_sem; // this is used to wait for the next fib to arrive. int wait; // Set to true when thread is in WaitForSingleObject unsigned long count; // total number of FIBs on FibList struct list_head fib_list; // this holds fibs and their attachd hw_fibs Index: linux-2.6-tip/include/linux/parport.h =================================================================== --- linux-2.6-tip.orig/include/linux/parport.h +++ linux-2.6-tip/include/linux/parport.h @@ -264,7 +264,7 @@ enum ieee1284_phase { struct ieee1284_info { int mode; volatile enum ieee1284_phase phase; - struct semaphore irq; + struct compat_semaphore irq; }; /* A parallel port */ patches/per-cpu-locked-infrastructure.patch0000664000076400007640000000473211155204467020161 0ustar tglxtglxSubject: per: cpu locked infrastructure From: Ingo Molnar Date: Mon Feb 09 23:43:22 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/percpu.h | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) Index: linux-2.6-tip/include/linux/percpu.h =================================================================== --- linux-2.6-tip.orig/include/linux/percpu.h +++ linux-2.6-tip/include/linux/percpu.h @@ -37,9 +37,17 @@ __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SPINLOCK(name, section) \ + __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ + PER_CPU_ATTRIBUTES __DEFINE_SPINLOCK(per_cpu__lock_##name##_locked); + #define DEFINE_PER_CPU(type, name) \ DEFINE_PER_CPU_SECTION(type, name, "") +#define DEFINE_PER_CPU_LOCKED(type, name) \ + DEFINE_PER_CPU_SPINLOCK(name, "") \ + DEFINE_PER_CPU_SECTION(type, name##_locked, "") + #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ ____cacheline_aligned_in_smp @@ -51,7 +59,9 @@ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) +#define EXPORT_PER_CPU_LOCKED_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var##_locked) #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) +#define EXPORT_PER_CPU_LOCKED_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var##_locked) /* enough to cover all DEFINE_PER_CPUs in modules */ #ifdef CONFIG_MODULES @@ -76,6 +86,29 @@ &__get_cpu_var(var); })) #define put_cpu_var(var) preempt_enable() +/* + * Per-CPU data structures with an additional lock - useful for + * PREEMPT_RT code that wants to reschedule but also wants + * per-CPU data structures. + * + * 'cpu' gets updated with the CPU the task is currently executing on. + * + * NOTE: on normal !PREEMPT_RT kernels these per-CPU variables + * are the same as the normal per-CPU variables, so there no + * runtime overhead. + */ +#define get_cpu_var_locked(var, cpuptr) \ +(*({ \ + int __cpu = raw_smp_processor_id(); \ + \ + *(cpuptr) = __cpu; \ + spin_lock(&__get_cpu_lock(var, __cpu)); \ + &__get_cpu_var_locked(var, __cpu); \ +})) + +#define put_cpu_var_locked(var, cpu) \ + do { (void)cpu; spin_unlock(&__get_cpu_lock(var, cpu)); } while (0) + #ifdef CONFIG_SMP #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA patches/percpu-locked-mm.patch0000664000076400007640000002056111155204613015424 0ustar tglxtglxSubject: patches/percpu-locked-mm.patch Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 2 - arch/x86/mm/init_64.c | 2 - include/asm-generic/percpu.h | 18 ++++++++++ include/asm-generic/tlb.h | 9 +++-- mm/swap.c | 72 ++++++++++++++++++++++++++++++++++--------- 5 files changed, 82 insertions(+), 21 deletions(-) Index: linux-2.6-tip/arch/x86/mm/init_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/mm/init_32.c +++ linux-2.6-tip/arch/x86/mm/init_32.c @@ -54,7 +54,7 @@ unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +DEFINE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); unsigned long highstart_pfn, highend_pfn; static noinline int do_test_wp_bit(void); Index: linux-2.6-tip/arch/x86/mm/init_64.c =================================================================== --- linux-2.6-tip.orig/arch/x86/mm/init_64.c +++ linux-2.6-tip/arch/x86/mm/init_64.c @@ -60,7 +60,7 @@ unsigned long max_pfn_mapped; static unsigned long dma_reserve __initdata; -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +DEFINE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); static int __init parse_direct_gbpages_off(char *arg) { Index: linux-2.6-tip/include/asm-generic/percpu.h =================================================================== --- linux-2.6-tip.orig/include/asm-generic/percpu.h +++ linux-2.6-tip/include/asm-generic/percpu.h @@ -9,6 +9,9 @@ */ #define per_cpu_var(var) per_cpu__##var +#define __per_cpu_var_lock(var) per_cpu__lock_##var##_locked +#define __per_cpu_var_lock_var(var) per_cpu__##var##_locked + #ifdef CONFIG_SMP /* @@ -60,6 +63,14 @@ extern unsigned long __per_cpu_offset[NR #define __raw_get_cpu_var(var) \ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) +#define per_cpu_lock(var, cpu) \ + (*SHIFT_PERCPU_PTR(&__per_cpu_var_lock(var), per_cpu_offset(cpu))) +#define per_cpu_var_locked(var, cpu) \ + (*SHIFT_PERCPU_PTR(&__per_cpu_var_lock_var(var), per_cpu_offset(cpu))) +#define __get_cpu_lock(var, cpu) \ + per_cpu_lock(var, cpu) +#define __get_cpu_var_locked(var, cpu) \ + per_cpu_var_locked(var, cpu) #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); @@ -68,9 +79,11 @@ extern void setup_per_cpu_areas(void); #else /* ! SMP */ #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) +#define per_cpu_var_locked(var, cpu) (*((void)(cpu), &__per_cpu_var_lock_var(var))) #define __get_cpu_var(var) per_cpu_var(var) #define __raw_get_cpu_var(var) per_cpu_var(var) - +#define __get_cpu_lock(var, cpu) __per_cpu_var_lock(var) +#define __get_cpu_var_locked(var, cpu) __per_cpu_var_lock_var(var) #endif /* SMP */ #ifndef PER_CPU_ATTRIBUTES @@ -79,6 +92,9 @@ extern void setup_per_cpu_areas(void); #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ __typeof__(type) per_cpu_var(name) +#define DECLARE_PER_CPU_LOCKED(type, name) \ + extern PER_CPU_ATTRIBUTES spinlock_t __per_cpu_var_lock(name); \ + extern PER_CPU_ATTRIBUTES __typeof__(type) __per_cpu_var_lock_var(name) /* * Optional methods for optimized non-lvalue per-cpu variable access. Index: linux-2.6-tip/include/asm-generic/tlb.h =================================================================== --- linux-2.6-tip.orig/include/asm-generic/tlb.h +++ linux-2.6-tip/include/asm-generic/tlb.h @@ -41,11 +41,12 @@ struct mmu_gather { unsigned int nr; /* set to ~0U means fast mode */ unsigned int need_flush;/* Really unmapped some ptes? */ unsigned int fullmm; /* non-zero means full mm flush */ + int cpu; struct page * pages[FREE_PTE_NR]; }; /* Users of the generic TLB shootdown code must declare this storage space. */ -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); +DECLARE_PER_CPU_LOCKED(struct mmu_gather, mmu_gathers); /* tlb_gather_mmu * Return a pointer to an initialized struct mmu_gather. @@ -53,8 +54,10 @@ DECLARE_PER_CPU(struct mmu_gather, mmu_g static inline struct mmu_gather * tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) { - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); + int cpu; + struct mmu_gather *tlb = &get_cpu_var_locked(mmu_gathers, &cpu); + tlb->cpu = cpu; tlb->mm = mm; /* Use fast mode if only one CPU is online */ @@ -90,7 +93,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, u /* keep the page table cache within bounds */ check_pgt_cache(); - put_cpu_var(mmu_gathers); + put_cpu_var_locked(mmu_gathers, tlb->cpu); } /* tlb_remove_page Index: linux-2.6-tip/mm/swap.c =================================================================== --- linux-2.6-tip.orig/mm/swap.c +++ linux-2.6-tip/mm/swap.c @@ -30,14 +30,49 @@ #include #include #include +#include #include "internal.h" /* How many pages do we try to swap or page in/out together? */ int page_cluster; -static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); -static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); +/* + * On PREEMPT_RT we don't want to disable preemption for cpu variables. + * We grab a cpu and then use that cpu to lock the variables accordingly. + * + * (On !PREEMPT_RT this turns into normal preempt-off sections, as before.) + */ +static DEFINE_PER_CPU_LOCKED(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); +static DEFINE_PER_CPU_LOCKED(struct pagevec, lru_rotate_pvecs); + +#define swap_get_cpu_var_irq_save(var, flags, cpu) \ + ({ \ + (void)flags; \ + &get_cpu_var_locked(var, &cpu); \ + }) + +#define swap_put_cpu_var_irq_restore(var, flags, cpu) \ + put_cpu_var_locked(var, cpu) + +#define swap_get_cpu_var(var, cpu) \ + &get_cpu_var_locked(var, &cpu) + +#define swap_put_cpu_var(var, cpu) \ + put_cpu_var_locked(var, cpu) + +#define swap_per_cpu_lock(var, cpu) \ + ({ \ + spin_lock(&__get_cpu_lock(var, cpu)); \ + &__get_cpu_var_locked(var, cpu); \ + }) + +#define swap_per_cpu_unlock(var, cpu) \ + spin_unlock(&__get_cpu_lock(var, cpu)); + +#define swap_get_cpu() raw_smp_processor_id() + +#define swap_put_cpu() /* * This path almost never happens for VM activity - pages are normally @@ -141,13 +176,13 @@ void rotate_reclaimable_page(struct pag !PageUnevictable(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; + int cpu; page_cache_get(page); - local_irq_save(flags); - pvec = &__get_cpu_var(lru_rotate_pvecs); + pvec = swap_get_cpu_var_irq_save(lru_rotate_pvecs, flags, cpu); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); - local_irq_restore(flags); + swap_put_cpu_var_irq_restore(lru_rotate_pvecs, flags, cpu); } } @@ -216,12 +251,14 @@ EXPORT_SYMBOL(mark_page_accessed); void __lru_cache_add(struct page *page, enum lru_list lru) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; + struct pagevec *pvec; + int cpu; + pvec = swap_get_cpu_var(lru_add_pvecs, cpu)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) ____pagevec_lru_add(pvec, lru); - put_cpu_var(lru_add_pvecs); + swap_put_cpu_var(lru_add_pvecs, cpu); } /** @@ -271,31 +308,36 @@ void add_page_to_unevictable_list(struct */ static void drain_cpu_pagevecs(int cpu) { - struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); - struct pagevec *pvec; + struct pagevec *pvecs, *pvec; int lru; + pvecs = swap_per_cpu_lock(lru_add_pvecs, cpu)[0]; for_each_lru(lru) { pvec = &pvecs[lru - LRU_BASE]; if (pagevec_count(pvec)) ____pagevec_lru_add(pvec, lru); } + swap_per_cpu_unlock(lru_add_pvecs, cpu); - pvec = &per_cpu(lru_rotate_pvecs, cpu); + pvec = swap_per_cpu_lock(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ - local_irq_save(flags); + local_irq_save_nort(flags); pagevec_move_tail(pvec); - local_irq_restore(flags); + local_irq_restore_nort(flags); } + swap_per_cpu_unlock(lru_rotate_pvecs, cpu); } void lru_add_drain(void) { - drain_cpu_pagevecs(get_cpu()); - put_cpu(); + int cpu; + + cpu = swap_get_cpu(); + drain_cpu_pagevecs(cpu); + swap_put_cpu(); } static void lru_add_drain_per_cpu(struct work_struct *dummy) @@ -369,7 +411,7 @@ void release_pages(struct page **pages, } __pagevec_free(&pages_to_free); pagevec_reinit(&pages_to_free); - } + } } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); patches/drain-all-local-pages-via-sched.patch0000664000076400007640000000357511150327752020164 0ustar tglxtglxSubject: drain: all local pages via sched From: Ingo Molnar Date: Wed Feb 04 16:56:11 CET 2009 Signed-off-by: Ingo Molnar --- mm/page_alloc.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) Index: linux-2.6-tip/mm/page_alloc.c =================================================================== --- linux-2.6-tip.orig/mm/page_alloc.c +++ linux-2.6-tip/mm/page_alloc.c @@ -1000,12 +1000,50 @@ void drain_local_pages(void *arg) drain_pages(smp_processor_id()); } +static void drain_local_pages_work(struct work_struct *wrk) +{ + drain_pages(smp_processor_id()); +} + /* * Spill all the per-cpu pages from all CPUs back into the buddy allocator */ void drain_all_pages(void) { +#ifdef CONFIG_PREEMPT_RT + /* + * HACK!!!!! + * For RT we can't use IPIs to run drain_local_pages, since + * that code will call spin_locks that will now sleep. + * But, schedule_on_each_cpu will call kzalloc, which will + * call page_alloc which was what calls this. + * + * Luckily, there's a condition to get here, and that is if + * the order passed in to alloc_pages is greater than 0 + * (alloced more than a page size). The slabs only allocate + * what is needed, and the allocation made by schedule_on_each_cpu + * does an alloc of "sizeof(void *)*nr_cpu_ids". + * + * So we can safely call schedule_on_each_cpu if that number + * is less than a page. Otherwise don't bother. At least warn of + * this issue. + * + * And yes, this is one big hack. Please fix ;-) + */ + if (sizeof(void *)*nr_cpu_ids < PAGE_SIZE) + schedule_on_each_cpu(drain_local_pages_work); + else { + static int once; + if (!once) { + printk(KERN_ERR "Can't drain all CPUS due to possible recursion\n"); + once = 1; + } + drain_local_pages(NULL); + } + +#else on_each_cpu(drain_local_pages, NULL, 1); +#endif } #ifdef CONFIG_HIBERNATION patches/rt-page_alloc.c-cleanup.patch0000664000076400007640000000130211150327752016634 0ustar tglxtglxSubject: rt: page_alloc.c cleanup From: Ingo Molnar Date: Sun Feb 08 15:59:46 CET 2009 Signed-off-by: Ingo Molnar --- mm/page_alloc.c | 2 ++ 1 file changed, 2 insertions(+) Index: linux-2.6-tip/mm/page_alloc.c =================================================================== --- linux-2.6-tip.orig/mm/page_alloc.c +++ linux-2.6-tip/mm/page_alloc.c @@ -1000,10 +1000,12 @@ void drain_local_pages(void *arg) drain_pages(smp_processor_id()); } +#ifdef CONFIG_PREEMPT_RT static void drain_local_pages_work(struct work_struct *wrk) { drain_pages(smp_processor_id()); } +#endif /* * Spill all the per-cpu pages from all CPUs back into the buddy allocator patches/percpu-locked-netfilter.patch0000664000076400007640000001046211150327144017006 0ustar tglxtglxSubject: patches/percpu-locked-netfilter.patch Signed-off-by: Ingo Molnar --- --- net/core/flow.c | 22 ++++++++++++++-------- net/ipv4/netfilter/arp_tables.c | 4 ++-- net/ipv4/netfilter/ip_tables.c | 2 +- 3 files changed, 17 insertions(+), 11 deletions(-) Index: linux-2.6-tip/net/core/flow.c =================================================================== --- linux-2.6-tip.orig/net/core/flow.c +++ linux-2.6-tip/net/core/flow.c @@ -39,9 +39,10 @@ atomic_t flow_cache_genid = ATOMIC_INIT( static u32 flow_hash_shift; #define flow_hash_size (1 << flow_hash_shift) -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; -#define flow_table(cpu) (per_cpu(flow_tables, cpu)) +static DEFINE_PER_CPU_LOCKED(struct flow_cache_entry **, flow_tables); + +#define flow_table(cpu) (per_cpu_var_locked(flow_tables, cpu)) static struct kmem_cache *flow_cachep __read_mostly; @@ -168,24 +169,24 @@ static int flow_key_compare(struct flowi void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, flow_resolve_t resolver) { - struct flow_cache_entry *fle, **head = NULL /* shut up GCC */; + struct flow_cache_entry **table, *fle, **head = NULL /* shut up GCC */; unsigned int hash; int cpu; local_bh_disable(); - cpu = smp_processor_id(); + table = get_cpu_var_locked(flow_tables, &cpu); fle = NULL; /* Packet really early in init? Making flow_cache_init a * pre-smp initcall would solve this. --RR */ - if (!flow_table(cpu)) + if (!table) goto nocache; if (flow_hash_rnd_recalc(cpu)) flow_new_hash_rnd(cpu); hash = flow_hash_code(key, cpu); - head = &flow_table(cpu)[hash]; + head = &table[hash]; for (fle = *head; fle; fle = fle->next) { if (fle->family == family && fle->dir == dir && @@ -195,6 +196,7 @@ void *flow_cache_lookup(struct net *net, if (ret) atomic_inc(fle->object_ref); + put_cpu_var_locked(flow_tables, cpu); local_bh_enable(); return ret; @@ -220,6 +222,8 @@ void *flow_cache_lookup(struct net *net, } nocache: + put_cpu_var_locked(flow_tables, cpu); + { int err; void *obj; @@ -249,14 +253,15 @@ nocache: static void flow_cache_flush_tasklet(unsigned long data) { struct flow_flush_info *info = (void *)data; + struct flow_cache_entry **table; int i; int cpu; - cpu = smp_processor_id(); + table = get_cpu_var_locked(flow_tables, &cpu); for (i = 0; i < flow_hash_size; i++) { struct flow_cache_entry *fle; - fle = flow_table(cpu)[i]; + fle = table[i]; for (; fle; fle = fle->next) { unsigned genid = atomic_read(&flow_cache_genid); @@ -267,6 +272,7 @@ static void flow_cache_flush_tasklet(uns atomic_dec(fle->object_ref); } } + put_cpu_var_locked(flow_tables, cpu); if (atomic_dec_and_test(&info->cpuleft)) complete(&info->completion); Index: linux-2.6-tip/net/ipv4/netfilter/arp_tables.c =================================================================== --- linux-2.6-tip.orig/net/ipv4/netfilter/arp_tables.c +++ linux-2.6-tip/net/ipv4/netfilter/arp_tables.c @@ -239,7 +239,7 @@ unsigned int arpt_do_table(struct sk_buf read_lock_bh(&table->lock); private = table->private; - table_base = (void *)private->entries[smp_processor_id()]; + table_base = (void *)private->entries[raw_smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); back = get_entry(table_base, private->underflow[hook]); @@ -1157,7 +1157,7 @@ static int do_add_counters(struct net *n i = 0; /* Choose the copy that is on our node */ - loc_cpu_entry = private->entries[smp_processor_id()]; + loc_cpu_entry = private->entries[raw_smp_processor_id()]; ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, add_counter_to_entry, Index: linux-2.6-tip/net/ipv4/netfilter/ip_tables.c =================================================================== --- linux-2.6-tip.orig/net/ipv4/netfilter/ip_tables.c +++ linux-2.6-tip/net/ipv4/netfilter/ip_tables.c @@ -350,7 +350,7 @@ ipt_do_table(struct sk_buff *skb, read_lock_bh(&table->lock); IP_NF_ASSERT(table->valid_hooks & (1 << hook)); private = table->private; - table_base = (void *)private->entries[smp_processor_id()]; + table_base = (void *)private->entries[raw_smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); /* For return from builtin chain */ patches/net-core-preempt-fix.patch0000664000076400007640000000113511150327144016226 0ustar tglxtglxSubject: net: core preempt fix From: Ingo Molnar Date: Wed Feb 04 00:03:01 CET 2009 Signed-off-by: Ingo Molnar --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/net/core/dev.c =================================================================== --- linux-2.6-tip.orig/net/core/dev.c +++ linux-2.6-tip/net/core/dev.c @@ -1986,8 +1986,8 @@ int netif_rx_ni(struct sk_buff *skb) { int err; - preempt_disable(); err = netif_rx(skb); + preempt_disable(); if (local_softirq_pending()) do_softirq(); preempt_enable(); patches/bh-uptodate-lock.patch0000664000076400007640000001117411150327752015427 0ustar tglxtglxSubject: patches/bh-uptodate-lock.patch Signed-off-by: Ingo Molnar --- fs/buffer.c | 20 ++++++++------------ fs/ntfs/aops.c | 9 +++------ include/linux/buffer_head.h | 5 +---- 3 files changed, 12 insertions(+), 22 deletions(-) Index: linux-2.6-tip/fs/buffer.c =================================================================== --- linux-2.6-tip.orig/fs/buffer.c +++ linux-2.6-tip/fs/buffer.c @@ -469,8 +469,7 @@ static void end_buffer_async_read(struct * decide that the page is now completely done. */ first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + spin_lock_irqsave(&first->b_uptodate_lock, flags); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -483,8 +482,7 @@ static void end_buffer_async_read(struct } tmp = tmp->b_this_page; } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); /* * If none of the buffers had errors and they are all @@ -496,8 +494,7 @@ static void end_buffer_async_read(struct return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); return; } @@ -532,8 +529,7 @@ static void end_buffer_async_write(struc } first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + spin_lock_irqsave(&first->b_uptodate_lock, flags); clear_buffer_async_write(bh); unlock_buffer(bh); @@ -545,14 +541,12 @@ static void end_buffer_async_write(struc } tmp = tmp->b_this_page; } - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); end_page_writeback(page); return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); return; } @@ -3302,6 +3296,7 @@ struct buffer_head *alloc_buffer_head(gf struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); + spin_lock_init(&ret->b_uptodate_lock); get_cpu_var(bh_accounting).nr++; recalc_bh_state(); put_cpu_var(bh_accounting); @@ -3313,6 +3308,7 @@ EXPORT_SYMBOL(alloc_buffer_head); void free_buffer_head(struct buffer_head *bh) { BUG_ON(!list_empty(&bh->b_assoc_buffers)); + BUG_ON(spin_is_locked(&bh->b_uptodate_lock)); kmem_cache_free(bh_cachep, bh); get_cpu_var(bh_accounting).nr--; recalc_bh_state(); Index: linux-2.6-tip/fs/ntfs/aops.c =================================================================== --- linux-2.6-tip.orig/fs/ntfs/aops.c +++ linux-2.6-tip/fs/ntfs/aops.c @@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(s "0x%llx.", (unsigned long long)bh->b_blocknr); } first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + spin_lock_irqsave(&first->b_uptodate_lock, flags); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(s } tmp = tmp->b_this_page; } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); /* * If none of the buffers had errors then we can set the page uptodate, * but we first have to perform the post read mst fixups, if the @@ -159,8 +157,7 @@ static void ntfs_end_buffer_async_read(s unlock_page(page); return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); return; } Index: linux-2.6-tip/include/linux/buffer_head.h =================================================================== --- linux-2.6-tip.orig/include/linux/buffer_head.h +++ linux-2.6-tip/include/linux/buffer_head.h @@ -21,10 +21,6 @@ enum bh_state_bits { BH_Dirty, /* Is dirty */ BH_Lock, /* Is locked */ BH_Req, /* Has been submitted for I/O */ - BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise - * IO completion of other buffers in the page - */ - BH_Mapped, /* Has a disk mapping */ BH_New, /* Disk mapping was newly created by get_block */ BH_Async_Read, /* Is under end_buffer_async_read I/O */ @@ -74,6 +70,7 @@ struct buffer_head { struct address_space *b_assoc_map; /* mapping this buffer is associated with */ atomic_t b_count; /* users using this buffer_head */ + spinlock_t b_uptodate_lock; }; /* patches/bh-state-lock.patch0000664000076400007640000000606311150327751014722 0ustar tglxtglxSubject: patches/bh-state-lock.patch I was compiling a kernel in a shell that I set to a priority of 20, and it locked up on the bit_spin_lock crap of jbd. This patch adds another spinlock to the buffer head and uses that instead of the bit_spins. From: Steven Rostedt Signed-off-by: Ingo Molnar -- fs/buffer.c | 3 ++- include/linux/buffer_head.h | 1 + include/linux/jbd.h | 12 ++++++------ 3 files changed, 9 insertions(+), 7 deletions(-) Index: linux-2.6-tip/fs/buffer.c =================================================================== --- linux-2.6-tip.orig/fs/buffer.c +++ linux-2.6-tip/fs/buffer.c @@ -40,7 +40,6 @@ #include #include #include -#include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); @@ -3297,6 +3296,7 @@ struct buffer_head *alloc_buffer_head(gf if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); spin_lock_init(&ret->b_uptodate_lock); + spin_lock_init(&ret->b_state_lock); get_cpu_var(bh_accounting).nr++; recalc_bh_state(); put_cpu_var(bh_accounting); @@ -3309,6 +3309,7 @@ void free_buffer_head(struct buffer_head { BUG_ON(!list_empty(&bh->b_assoc_buffers)); BUG_ON(spin_is_locked(&bh->b_uptodate_lock)); + BUG_ON(spin_is_locked(&bh->b_state_lock)); kmem_cache_free(bh_cachep, bh); get_cpu_var(bh_accounting).nr--; recalc_bh_state(); Index: linux-2.6-tip/include/linux/buffer_head.h =================================================================== --- linux-2.6-tip.orig/include/linux/buffer_head.h +++ linux-2.6-tip/include/linux/buffer_head.h @@ -71,6 +71,7 @@ struct buffer_head { associated with */ atomic_t b_count; /* users using this buffer_head */ spinlock_t b_uptodate_lock; + spinlock_t b_state_lock; }; /* Index: linux-2.6-tip/include/linux/jbd.h =================================================================== --- linux-2.6-tip.orig/include/linux/jbd.h +++ linux-2.6-tip/include/linux/jbd.h @@ -315,32 +315,32 @@ static inline struct journal_head *bh2jh static inline void jbd_lock_bh_state(struct buffer_head *bh) { - bit_spin_lock(BH_State, &bh->b_state); + spin_lock(&bh->b_state_lock); } static inline int jbd_trylock_bh_state(struct buffer_head *bh) { - return bit_spin_trylock(BH_State, &bh->b_state); + return spin_trylock(&bh->b_state_lock); } static inline int jbd_is_locked_bh_state(struct buffer_head *bh) { - return bit_spin_is_locked(BH_State, &bh->b_state); + return spin_is_locked(&bh->b_state_lock); } static inline void jbd_unlock_bh_state(struct buffer_head *bh) { - bit_spin_unlock(BH_State, &bh->b_state); + spin_unlock(&bh->b_state_lock); } static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { - bit_spin_lock(BH_JournalHead, &bh->b_state); + spin_lock_irq(&bh->b_uptodate_lock); } static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) { - bit_spin_unlock(BH_JournalHead, &bh->b_state); + spin_unlock_irq(&bh->b_uptodate_lock); } struct jbd_revoke_table_s; patches/jbd_assertions_smp_only.patch0000664000076400007640000000404111150327144017204 0ustar tglxtglxSubject: patches/jbd_assertions_smp_only.patch Signed-off-by: Ingo Molnar --- fs/jbd/transaction.c | 6 +++--- include/linux/jbd.h | 9 +++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) Index: linux-2.6-tip/fs/jbd/transaction.c =================================================================== --- linux-2.6-tip.orig/fs/jbd/transaction.c +++ linux-2.6-tip/fs/jbd/transaction.c @@ -1582,7 +1582,7 @@ static void __journal_temp_unlink_buffer transaction_t *transaction; struct buffer_head *bh = jh2bh(jh); - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); + J_ASSERT_JH_SMP(jh, jbd_is_locked_bh_state(bh)); transaction = jh->b_transaction; if (transaction) assert_spin_locked(&transaction->t_journal->j_list_lock); @@ -2077,7 +2077,7 @@ void __journal_file_buffer(struct journa int was_dirty = 0; struct buffer_head *bh = jh2bh(jh); - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); + J_ASSERT_JH_SMP(jh, jbd_is_locked_bh_state(bh)); assert_spin_locked(&transaction->t_journal->j_list_lock); J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); @@ -2166,7 +2166,7 @@ void __journal_refile_buffer(struct jour int was_dirty; struct buffer_head *bh = jh2bh(jh); - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); + J_ASSERT_JH_SMP(jh, jbd_is_locked_bh_state(bh)); if (jh->b_transaction) assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); Index: linux-2.6-tip/include/linux/jbd.h =================================================================== --- linux-2.6-tip.orig/include/linux/jbd.h +++ linux-2.6-tip/include/linux/jbd.h @@ -260,6 +260,15 @@ void buffer_assertion_failure(struct buf #define J_ASSERT_JH(jh, expr) J_ASSERT(expr) #endif +/* + * For assertions that are only valid on SMP (e.g. spin_is_locked()): + */ +#ifdef CONFIG_SMP +# define J_ASSERT_JH_SMP(jh, expr) J_ASSERT_JH(jh, expr) +#else +# define J_ASSERT_JH_SMP(jh, assert) do { } while (0) +#endif + #if defined(JBD_PARANOID_IOFAIL) #define J_EXPECT(expr, why...) J_ASSERT(expr) #define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr) patches/tasklet-redesign.patch0000664000076400007640000002257711157735270015550 0ustar tglxtglxSubject: patches/tasklet-redesign.patch From: Ingo Molnar tasklet redesign: make it saner and make it easier to thread. Signed-off-by: Ingo Molnar ---- include/linux/interrupt.h | 33 ++++---- kernel/softirq.c | 184 ++++++++++++++++++++++++++++++++-------------- 2 files changed, 149 insertions(+), 68 deletions(-) Index: linux-2.6-tip/include/linux/interrupt.h =================================================================== --- linux-2.6-tip.orig/include/linux/interrupt.h +++ linux-2.6-tip/include/linux/interrupt.h @@ -342,8 +342,9 @@ extern void __send_remote_softirq(struct to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its excecution is still not started, it will be executed only once. - * If this tasklet is already running on another CPU (or schedule is called - from tasklet itself), it is rescheduled for later. + * If this tasklet is already running on another CPU, it is rescheduled + for later. + * Schedule must not be called from the tasklet itself (a lockup occurs) * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. @@ -368,15 +369,25 @@ struct tasklet_struct name = { NULL, 0, enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ - TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ + TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ + TASKLET_STATE_PENDING /* Tasklet is pending */ }; -#ifdef CONFIG_SMP +#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) +#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } +static inline int tasklet_tryunlock(struct tasklet_struct *t) +{ + return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; +} + static inline void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_clear_bit(); @@ -389,6 +400,7 @@ static inline void tasklet_unlock_wait(s } #else #define tasklet_trylock(t) 1 +#define tasklet_tryunlock(t) 1 #define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock(t) do { } while (0) #endif @@ -437,17 +449,8 @@ static inline void tasklet_disable(struc smp_mb(); } -static inline void tasklet_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic_dec(); - atomic_dec(&t->count); -} - -static inline void tasklet_hi_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic_dec(); - atomic_dec(&t->count); -} +extern void tasklet_enable(struct tasklet_struct *t); +extern void tasklet_hi_enable(struct tasklet_struct *t); extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); Index: linux-2.6-tip/kernel/softirq.c =================================================================== --- linux-2.6-tip.orig/kernel/softirq.c +++ linux-2.6-tip/kernel/softirq.c @@ -425,15 +425,45 @@ struct tasklet_head static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); +static void inline +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) +{ + if (tasklet_trylock(t)) { +again: + /* We may have been preempted before tasklet_trylock + * and __tasklet_action may have already run. + * So double check the sched bit while the takslet + * is locked before adding it to the list. + */ + if (test_bit(TASKLET_STATE_SCHED, &t->state)) { + t->next = NULL; + *head->tail = t; + head->tail = &(t->next); + raise_softirq_irqoff(nr); + tasklet_unlock(t); + } else { + /* This is subtle. If we hit the corner case above + * It is possible that we get preempted right here, + * and another task has successfully called + * tasklet_schedule(), then this function, and + * failed on the trylock. Thus we must be sure + * before releasing the tasklet lock, that the + * SCHED_BIT is clear. Otherwise the tasklet + * may get its SCHED_BIT set, but not added to the + * list + */ + if (!tasklet_tryunlock(t)) + goto again; + } + } +} + void __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); - t->next = NULL; - *__get_cpu_var(tasklet_vec).tail = t; - __get_cpu_var(tasklet_vec).tail = &(t->next); - raise_softirq_irqoff(TASKLET_SOFTIRQ); + __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ); local_irq_restore(flags); } @@ -444,10 +474,7 @@ void __tasklet_hi_schedule(struct taskle unsigned long flags; local_irq_save(flags); - t->next = NULL; - *__get_cpu_var(tasklet_hi_vec).tail = t; - __get_cpu_var(tasklet_hi_vec).tail = &(t->next); - raise_softirq_irqoff(HI_SOFTIRQ); + __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), HI_SOFTIRQ); local_irq_restore(flags); } @@ -464,74 +491,125 @@ void __tasklet_hi_schedule_first(struct EXPORT_SYMBOL(__tasklet_hi_schedule_first); -static void tasklet_action(struct softirq_action *a) +void tasklet_enable(struct tasklet_struct *t) { - struct tasklet_struct *list; + if (!atomic_dec_and_test(&t->count)) + return; + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) + tasklet_schedule(t); +} - local_irq_disable(); - list = __get_cpu_var(tasklet_vec).head; - __get_cpu_var(tasklet_vec).head = NULL; - __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; - local_irq_enable(); +EXPORT_SYMBOL(tasklet_enable); + +void tasklet_hi_enable(struct tasklet_struct *t) +{ + if (!atomic_dec_and_test(&t->count)) + return; + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) + tasklet_hi_schedule(t); +} + +EXPORT_SYMBOL(tasklet_hi_enable); + +static void +__tasklet_action(struct softirq_action *a, struct tasklet_struct *list) +{ + int loops = 1000000; while (list) { struct tasklet_struct *t = list; list = list->next; - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - BUG(); - t->func(t->data); - tasklet_unlock(t); - continue; - } - tasklet_unlock(t); + /* + * Should always succeed - after a tasklist got on the + * list (after getting the SCHED bit set from 0 to 1), + * nothing but the tasklet softirq it got queued to can + * lock it: + */ + if (!tasklet_trylock(t)) { + WARN_ON(1); + continue; } - local_irq_disable(); t->next = NULL; - *__get_cpu_var(tasklet_vec).tail = t; - __get_cpu_var(tasklet_vec).tail = &(t->next); - __do_raise_softirq_irqoff(TASKLET_SOFTIRQ); - local_irq_enable(); + + /* + * If we cannot handle the tasklet because it's disabled, + * mark it as pending. tasklet_enable() will later + * re-schedule the tasklet. + */ + if (unlikely(atomic_read(&t->count))) { +out_disabled: + /* implicit unlock: */ + wmb(); + t->state = TASKLET_STATEF_PENDING; + continue; + } + + /* + * After this point on the tasklet might be rescheduled + * on another CPU, but it can only be added to another + * CPU's tasklet list if we unlock the tasklet (which we + * dont do yet). + */ + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + WARN_ON(1); + +again: + t->func(t->data); + + /* + * Try to unlock the tasklet. We must use cmpxchg, because + * another CPU might have scheduled or disabled the tasklet. + * We only allow the STATE_RUN -> 0 transition here. + */ + while (!tasklet_tryunlock(t)) { + /* + * If it got disabled meanwhile, bail out: + */ + if (atomic_read(&t->count)) + goto out_disabled; + /* + * If it got scheduled meanwhile, re-execute + * the tasklet function: + */ + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + goto again; + if (!--loops) { + printk("hm, tasklet state: %08lx\n", t->state); + WARN_ON(1); + tasklet_unlock(t); + break; + } + } } } -static void tasklet_hi_action(struct softirq_action *a) +static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); - list = __get_cpu_var(tasklet_hi_vec).head; - __get_cpu_var(tasklet_hi_vec).head = NULL; - __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; + list = __get_cpu_var(tasklet_vec).head; + __get_cpu_var(tasklet_vec).head = NULL; + __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; local_irq_enable(); - while (list) { - struct tasklet_struct *t = list; + __tasklet_action(a, list); +} - list = list->next; +static void tasklet_hi_action(struct softirq_action *a) +{ + struct tasklet_struct *list; - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - BUG(); - t->func(t->data); - tasklet_unlock(t); - continue; - } - tasklet_unlock(t); - } + local_irq_disable(); + list = __get_cpu_var(tasklet_hi_vec).head; + __get_cpu_var(tasklet_hi_vec).head = NULL; + __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_vec).head; + local_irq_enable(); - local_irq_disable(); - t->next = NULL; - *__get_cpu_var(tasklet_hi_vec).tail = t; - __get_cpu_var(tasklet_hi_vec).tail = &(t->next); - __do_raise_softirq_irqoff(HI_SOFTIRQ); - local_irq_enable(); - } + __tasklet_action(a, list); } patches/tasklet-busy-loop-hack.patch0000664000076400007640000000345111157735270016573 0ustar tglxtglxSubject: tasklet: busy loop hack From: Ingo Molnar Date: Wed Feb 04 00:03:00 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/interrupt.h | 6 ++---- kernel/softirq.c | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) Index: linux-2.6-tip/include/linux/interrupt.h =================================================================== --- linux-2.6-tip.orig/include/linux/interrupt.h +++ linux-2.6-tip/include/linux/interrupt.h @@ -394,10 +394,8 @@ static inline void tasklet_unlock(struct clear_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline void tasklet_unlock_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } -} +extern void tasklet_unlock_wait(struct tasklet_struct *t); + #else #define tasklet_trylock(t) 1 #define tasklet_tryunlock(t) 1 Index: linux-2.6-tip/kernel/softirq.c =================================================================== --- linux-2.6-tip.orig/kernel/softirq.c +++ linux-2.6-tip/kernel/softirq.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -783,6 +784,25 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) + +void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { + /* + * Hack for now to avoid this busy-loop: + */ +#ifdef CONFIG_PREEMPT_RT + msleep(1); +#else + barrier(); +#endif + } +} +EXPORT_SYMBOL(tasklet_unlock_wait); + +#endif + static int ksoftirqd(void * __data) { struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2 }; patches/disable-irqpoll.patch0000664000076400007640000000211311150327144015334 0ustar tglxtglxSubject: patches/disable-irqpoll.patch Signed-off-by: Ingo Molnar --- kernel/irq/spurious.c | 10 ++++++++++ 1 file changed, 10 insertions(+) Index: linux-2.6-tip/kernel/irq/spurious.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/spurious.c +++ linux-2.6-tip/kernel/irq/spurious.c @@ -300,6 +300,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir static int __init irqfixup_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT + printk(KERN_WARNING "irqfixup boot option not supported " + "w/ CONFIG_PREEMPT_RT\n"); + return 1; +#endif irqfixup = 1; printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); printk(KERN_WARNING "This may impact system performance.\n"); @@ -313,6 +318,11 @@ MODULE_PARM_DESC("irqfixup", "0: No fixu static int __init irqpoll_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT + printk(KERN_WARNING "irqpoll boot option not supported " + "w/ CONFIG_PREEMPT_RT\n"); + return 1; +#endif irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); patches/kstat-add-rt-stats.patch0000664000076400007640000001204311160544606015714 0ustar tglxtglxSubject: add rt stats to /proc/stat From: Thomas Gleixner add RT stats to /proc/stat Signed-off-by: Ingo Molnar fs/proc/stat.c | 24 ++++++++++++++++++------ include/linux/kernel_stat.h | 2 ++ kernel/sched.c | 6 +++++- 3 files changed, 25 insertions(+), 7 deletions(-) Index: linux-2.6-tip/fs/proc/stat.c =================================================================== --- linux-2.6-tip.orig/fs/proc/stat.c +++ linux-2.6-tip/fs/proc/stat.c @@ -23,13 +23,14 @@ static int show_stat(struct seq_file *p, { int i, j; unsigned long jif; - cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; + cputime64_t user_rt, user, nice, system_rt, system, idle, + iowait, irq, softirq, steal; cputime64_t guest; u64 sum = 0; struct timespec boottime; unsigned int per_irq_sum; - user = nice = system = idle = iowait = + user_rt = user = nice = system_rt = system = idle = iowait = irq = softirq = steal = cputime64_zero; guest = cputime64_zero; getboottime(&boottime); @@ -44,6 +45,8 @@ static int show_stat(struct seq_file *p, irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); + user_rt = cputime64_add(user_rt, kstat_cpu(i).cpustat.user_rt); + system_rt = cputime64_add(system_rt, kstat_cpu(i).cpustat.system_rt); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); for_each_irq_nr(j) { sum += kstat_irqs_cpu(j, i); @@ -52,7 +55,10 @@ static int show_stat(struct seq_file *p, } sum += arch_irq_stat(); - seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", + user = cputime64_add(user_rt, user); + system = cputime64_add(system_rt, system); + + seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), (unsigned long long)cputime64_to_clock_t(system), @@ -61,13 +67,17 @@ static int show_stat(struct seq_file *p, (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), (unsigned long long)cputime64_to_clock_t(steal), + (unsigned long long)cputime64_to_clock_t(user_rt), + (unsigned long long)cputime64_to_clock_t(system_rt), (unsigned long long)cputime64_to_clock_t(guest)); for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = kstat_cpu(i).cpustat.user; + user_rt = kstat_cpu(i).cpustat.user_rt; + system_rt = kstat_cpu(i).cpustat.system_rt; + user = cputime64_add(user_rt, kstat_cpu(i).cpustat.user); nice = kstat_cpu(i).cpustat.nice; - system = kstat_cpu(i).cpustat.system; + system = cputime64_add(system_rt, kstat_cpu(i).cpustat.system); idle = kstat_cpu(i).cpustat.idle; iowait = kstat_cpu(i).cpustat.iowait; irq = kstat_cpu(i).cpustat.irq; @@ -75,7 +85,7 @@ static int show_stat(struct seq_file *p, steal = kstat_cpu(i).cpustat.steal; guest = kstat_cpu(i).cpustat.guest; seq_printf(p, - "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", + "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", i, (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), @@ -85,6 +95,8 @@ static int show_stat(struct seq_file *p, (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), (unsigned long long)cputime64_to_clock_t(steal), + (unsigned long long)cputime64_to_clock_t(user_rt), + (unsigned long long)cputime64_to_clock_t(system_rt), (unsigned long long)cputime64_to_clock_t(guest)); } seq_printf(p, "intr %llu", (unsigned long long)sum); Index: linux-2.6-tip/include/linux/kernel_stat.h =================================================================== --- linux-2.6-tip.orig/include/linux/kernel_stat.h +++ linux-2.6-tip/include/linux/kernel_stat.h @@ -23,6 +23,8 @@ struct cpu_usage_stat { cputime64_t idle; cputime64_t iowait; cputime64_t steal; + cputime64_t user_rt; + cputime64_t system_rt; cputime64_t guest; }; Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -4424,7 +4424,9 @@ void account_user_time(struct task_struc /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); - if (TASK_NICE(p) > 0) + if (rt_task(p)) + cpustat->user_rt = cputime64_add(cpustat->user_rt, tmp); + else if (TASK_NICE(p) > 0) cpustat->nice = cputime64_add(cpustat->nice, tmp); else cpustat->user = cputime64_add(cpustat->user, tmp); @@ -4486,6 +4488,8 @@ void account_system_time(struct task_str cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count() || (p->flags & PF_SOFTIRQ)) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); + else if (rt_task(p)) + cpustat->system_rt = cputime64_add(cpustat->system_rt, tmp); else cpustat->system = cputime64_add(cpustat->system, tmp); patches/preempt-realtime-warn-and-bug-on.patch0000664000076400007640000000221011150327144020415 0ustar tglxtglxSubject: preempt: realtime warn and bug on From: Ingo Molnar Date: Wed Feb 04 00:02:59 CET 2009 Signed-off-by: Ingo Molnar --- include/asm-generic/bug.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) Index: linux-2.6-tip/include/asm-generic/bug.h =================================================================== --- linux-2.6-tip.orig/include/asm-generic/bug.h +++ linux-2.6-tip/include/asm-generic/bug.h @@ -3,6 +3,8 @@ #include +extern void __WARN_ON(const char *func, const char *file, const int line); + #ifdef CONFIG_BUG #ifdef CONFIG_GENERIC_BUG @@ -139,4 +141,16 @@ __WARN(int condition, const char *fmt, . # define WARN_ON_SMP(x) do { } while (0) #endif +#ifdef CONFIG_PREEMPT_RT +# define BUG_ON_RT(c) BUG_ON(c) +# define BUG_ON_NONRT(c) do { } while (0) +# define WARN_ON_RT(condition) WARN_ON(condition) +# define WARN_ON_NONRT(condition) do { } while (0) +#else +# define BUG_ON_RT(c) do { } while (0) +# define BUG_ON_NONRT(c) BUG_ON(c) +# define WARN_ON_RT(condition) do { } while (0) +# define WARN_ON_NONRT(condition) WARN_ON(condition) +#endif + #endif patches/cputimer-thread-rt_A0.patch0000664000076400007640000002004311150327144016313 0ustar tglxtglxSubject: patches/cputimer-thread-rt_A0.patch thanks -john Signed-off-by: John Stultz --- include/linux/init_task.h | 1 include/linux/sched.h | 2 init/main.c | 1 kernel/fork.c | 2 kernel/posix-cpu-timers.c | 169 ++++++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 170 insertions(+), 5 deletions(-) Index: linux-2.6-tip/include/linux/init_task.h =================================================================== --- linux-2.6-tip.orig/include/linux/init_task.h +++ linux-2.6-tip/include/linux/init_task.h @@ -186,6 +186,7 @@ extern struct cred init_cred; .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ .timer_slack_ns = 50000, /* 50 usec default slack */ \ + .posix_timer_list = NULL, \ .pi_lock = RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ Index: linux-2.6-tip/include/linux/sched.h =================================================================== --- linux-2.6-tip.orig/include/linux/sched.h +++ linux-2.6-tip/include/linux/sched.h @@ -1274,6 +1274,8 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; + struct task_struct* posix_timer_list; + /* process credentials */ const struct cred *real_cred; /* objective and real subjective task * credentials (COW) */ Index: linux-2.6-tip/init/main.c =================================================================== --- linux-2.6-tip.orig/init/main.c +++ linux-2.6-tip/init/main.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include Index: linux-2.6-tip/kernel/fork.c =================================================================== --- linux-2.6-tip.orig/kernel/fork.c +++ linux-2.6-tip/kernel/fork.c @@ -1061,7 +1061,7 @@ static struct task_struct *copy_process( acct_clear_integrals(p); posix_cpu_timers_init(p); - + p->posix_timer_list = NULL; p->lock_depth = -1; /* -1 = no lock */ do_posix_clock_monotonic_gettime(&p->start_time); p->real_start_time = p->start_time; Index: linux-2.6-tip/kernel/posix-cpu-timers.c =================================================================== --- linux-2.6-tip.orig/kernel/posix-cpu-timers.c +++ linux-2.6-tip/kernel/posix-cpu-timers.c @@ -557,7 +557,7 @@ static void arm_timer(struct k_itimer *t p->cpu_timers : p->signal->cpu_timers); head += CPUCLOCK_WHICH(timer->it_clock); - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); spin_lock(&p->sighand->siglock); listpos = head; @@ -745,7 +745,7 @@ int posix_cpu_timer_set(struct k_itimer /* * Disarm any old timer after extracting its expiry time. */ - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); ret = 0; spin_lock(&p->sighand->siglock); @@ -1378,12 +1378,11 @@ static inline int fastpath_timer_check(s * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ -void run_posix_cpu_timers(struct task_struct *tsk) +void __run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; - BUG_ON(!irqs_disabled()); /* * The fast path checks that there are no expired thread or thread @@ -1435,6 +1434,162 @@ void run_posix_cpu_timers(struct task_st } } +#include +#include +DEFINE_PER_CPU(struct task_struct *, posix_timer_task); +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); + +static int posix_cpu_timers_thread(void *data) +{ + int cpu = (long)data; + + BUG_ON(per_cpu(posix_timer_task,cpu) != current); + + while (!kthread_should_stop()) { + struct task_struct *tsk = NULL; + struct task_struct *next = NULL; + + if (cpu_is_offline(cpu)) + goto wait_to_die; + + /* grab task list */ + raw_local_irq_disable(); + tsk = per_cpu(posix_timer_tasklist, cpu); + per_cpu(posix_timer_tasklist, cpu) = NULL; + raw_local_irq_enable(); + + /* its possible the list is empty, just return */ + if (!tsk) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + __set_current_state(TASK_RUNNING); + continue; + } + + /* Process task list */ + while (1) { + /* save next */ + next = tsk->posix_timer_list; + + /* run the task timers, clear its ptr and + * unreference it + */ + __run_posix_cpu_timers(tsk); + tsk->posix_timer_list = NULL; + put_task_struct(tsk); + + /* check if this is the last on the list */ + if (next == tsk) + break; + tsk = next; + } + } + return 0; + +wait_to_die: + /* Wait for kthread_stop */ + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +void run_posix_cpu_timers(struct task_struct *tsk) +{ + unsigned long cpu = smp_processor_id(); + struct task_struct *tasklist; + + BUG_ON(!irqs_disabled()); + if(!per_cpu(posix_timer_task, cpu)) + return; + /* get per-cpu references */ + tasklist = per_cpu(posix_timer_tasklist, cpu); + + /* check to see if we're already queued */ + if (!tsk->posix_timer_list) { + get_task_struct(tsk); + if (tasklist) { + tsk->posix_timer_list = tasklist; + } else { + /* + * The list is terminated by a self-pointing + * task_struct + */ + tsk->posix_timer_list = tsk; + } + per_cpu(posix_timer_tasklist, cpu) = tsk; + } + /* XXX signal the thread somehow */ + wake_up_process(per_cpu(posix_timer_task,cpu)); +} + +/* + * posix_cpu_thread_call - callback that gets triggered when a CPU is added. + * Here we can start up the necessary migration thread for the new CPU. + */ +static int posix_cpu_thread_call(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + int cpu = (long)hcpu; + struct task_struct *p; + struct sched_param param; + + switch (action) { + case CPU_UP_PREPARE: + p = kthread_create(posix_cpu_timers_thread, hcpu, + "posix_cpu_timers/%d",cpu); + if (IS_ERR(p)) + return NOTIFY_BAD; + p->flags |= PF_NOFREEZE; + kthread_bind(p, cpu); + /* Must be high prio to avoid getting starved */ + param.sched_priority = MAX_RT_PRIO-1; + sched_setscheduler(p, SCHED_FIFO, ¶m); + per_cpu(posix_timer_task,cpu) = p; + break; + case CPU_ONLINE: + /* Strictly unneccessary, as first user will wake it. */ + wake_up_process(per_cpu(posix_timer_task,cpu)); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + /* Unbind it from offline cpu so it can run. Fall thru. */ + kthread_bind(per_cpu(posix_timer_task,cpu), + any_online_cpu(cpu_online_map)); + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; + case CPU_DEAD: + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; +#endif + } + return NOTIFY_OK; +} + +/* Register at highest priority so that task migration (migrate_all_tasks) + * happens before everything else. + */ +static struct notifier_block __devinitdata posix_cpu_thread_notifier = { + .notifier_call = posix_cpu_thread_call, + .priority = 10 +}; + +static int __init posix_cpu_thread_init(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + /* Start one for boot CPU. */ + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, cpu); + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, cpu); + register_cpu_notifier(&posix_cpu_thread_notifier); + return 0; +} +early_initcall(posix_cpu_thread_init); + /* * Set one of the process-wide special case CPU timers. * The tsk->sighand->siglock must be held by the caller. @@ -1700,6 +1855,12 @@ static __init int init_posix_cpu_timers( .nsleep = thread_cpu_nsleep, .nsleep_restart = thread_cpu_nsleep_restart, }; + unsigned long cpu; + + /* init the per-cpu posix_timer_tasklets */ + for_each_cpu_mask(cpu, cpu_possible_map) { + per_cpu(posix_timer_tasklist, cpu) = NULL; + } register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); patches/cputimer-thread-rt-fix.patch0000664000076400007640000000127711150327144016567 0ustar tglxtglxSubject: cputimer: thread rt fix From: Ingo Molnar Date: Wed Feb 04 00:02:58 CET 2009 Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/kernel/posix-cpu-timers.c =================================================================== --- linux-2.6-tip.orig/kernel/posix-cpu-timers.c +++ linux-2.6-tip/kernel/posix-cpu-timers.c @@ -1523,7 +1523,7 @@ void run_posix_cpu_timers(struct task_st per_cpu(posix_timer_tasklist, cpu) = tsk; } /* XXX signal the thread somehow */ - wake_up_process(per_cpu(posix_timer_task,cpu)); + wake_up_process(per_cpu(posix_timer_task, cpu)); } /* patches/shorten-posix-cpu-timers-name.patch0000664000076400007640000000202111150327144020075 0ustar tglxtglxSubject: rt: shorten posix_cpu_timers/ kernel thread names From: Arnaldo Carvalho de Melo Date: Wed, 13 Aug 2008 15:42:11 -0300 Shorten the softirq kernel thread names because they always overflow the limited comm length, appearing as "posix_cpu_timer" CPU# times. Done on 2.6.24.7, but probably applicable to later kernels. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/kernel/posix-cpu-timers.c =================================================================== --- linux-2.6-tip.orig/kernel/posix-cpu-timers.c +++ linux-2.6-tip/kernel/posix-cpu-timers.c @@ -1540,7 +1540,7 @@ static int posix_cpu_thread_call(struct switch (action) { case CPU_UP_PREPARE: p = kthread_create(posix_cpu_timers_thread, hcpu, - "posix_cpu_timers/%d",cpu); + "posixcputmr/%d",cpu); if (IS_ERR(p)) return NOTIFY_BAD; p->flags |= PF_NOFREEZE; patches/vortex-fix.patch0000664000076400007640000000530211150327144014367 0ustar tglxtglxSubject: patches/vortex-fix.patch Argh, cut and paste wasn't enough... Use this patch instead. It needs an irq disable. But, believe it or not, on SMP this is actually better. If the irq is shared (as it is in Mark's case), we don't stop the irq of other devices from being handled on another CPU (unfortunately for Mark, he pinned all interrupts to one CPU). Andrew, should this be changed in mainline too? -- Steve Signed-off-by: Steven Rostedt drivers/net/3c59x.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) Signed-off-by: Ingo Molnar Index: linux-2.6-tip/drivers/net/3c59x.c =================================================================== --- linux-2.6-tip.orig/drivers/net/3c59x.c +++ linux-2.6-tip/drivers/net/3c59x.c @@ -791,9 +791,9 @@ static void poll_vortex(struct net_devic { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #endif @@ -1739,6 +1739,7 @@ vortex_timer(unsigned long data) int next_tick = 60*HZ; int ok = 0; int media_status, old_window; + unsigned long flags; if (vortex_debug > 2) { printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n", @@ -1746,7 +1747,7 @@ vortex_timer(unsigned long data) printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); } - disable_irq_lockdep(dev->irq); + spin_lock_irqsave(&vp->lock, flags); old_window = ioread16(ioaddr + EL3_CMD) >> 13; EL3WINDOW(4); media_status = ioread16(ioaddr + Wn4_Media); @@ -1769,10 +1770,7 @@ vortex_timer(unsigned long data) case XCVR_MII: case XCVR_NWAY: { ok = 1; - /* Interrupts are already disabled */ - spin_lock(&vp->lock); vortex_check_media(dev, 0); - spin_unlock(&vp->lock); } break; default: /* Other media types handled by Tx timeouts. */ @@ -1828,7 +1826,7 @@ leave_media_alone: dev->name, media_tbl[dev->if_port].name); EL3WINDOW(old_window); - enable_irq_lockdep(dev->irq); + spin_unlock_irqrestore(&vp->lock, flags); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) iowrite16(FakeIntr, ioaddr + EL3_CMD); @@ -1862,12 +1860,12 @@ static void vortex_tx_timeout(struct net * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } } patches/serial-locking-rt-cleanup.patch0000664000076400007640000000231411150327750017232 0ustar tglxtglxSubject: patches/serial-locking-rt-cleanup.patch Signed-off-by: Ingo Molnar --- drivers/serial/8250.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) Index: linux-2.6-tip/drivers/serial/8250.c =================================================================== --- linux-2.6-tip.orig/drivers/serial/8250.c +++ linux-2.6-tip/drivers/serial/8250.c @@ -2707,14 +2707,10 @@ serial8250_console_write(struct console touch_nmi_watchdog(); - local_irq_save(flags); - if (up->port.sysrq) { - /* serial8250_handle_port() already took the lock */ - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&up->port.lock); - } else - spin_lock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); /* * First save the IER then disable the interrupts @@ -2746,8 +2742,7 @@ serial8250_console_write(struct console check_modem_status(up); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int __init serial8250_console_setup(struct console *co, char *options) patches/serial-slow-machines.patch0000664000076400007640000000344711150327144016312 0ustar tglxtglxSubject: serial: slow machines From: Ingo Molnar Date: Wed Feb 04 00:02:57 CET 2009 Signed-off-by: Ingo Molnar --- drivers/char/tty_buffer.c | 4 ++++ drivers/serial/8250.c | 11 ++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) Index: linux-2.6-tip/drivers/char/tty_buffer.c =================================================================== --- linux-2.6-tip.orig/drivers/char/tty_buffer.c +++ linux-2.6-tip/drivers/char/tty_buffer.c @@ -482,10 +482,14 @@ void tty_flip_buffer_push(struct tty_str tty->buf.tail->commit = tty->buf.tail->used; spin_unlock_irqrestore(&tty->buf.lock, flags); +#ifndef CONFIG_PREEMPT_RT if (tty->low_latency) flush_to_ldisc(&tty->buf.work.work); else schedule_delayed_work(&tty->buf.work, 1); +#else + flush_to_ldisc(&tty->buf.work.work); +#endif } EXPORT_SYMBOL(tty_flip_buffer_push); Index: linux-2.6-tip/drivers/serial/8250.c =================================================================== --- linux-2.6-tip.orig/drivers/serial/8250.c +++ linux-2.6-tip/drivers/serial/8250.c @@ -1546,7 +1546,10 @@ static irqreturn_t serial8250_interrupt( { struct irq_info *i = dev_id; struct list_head *l, *end = NULL; - int pass_counter = 0, handled = 0; +#ifndef CONFIG_PREEMPT_RT + int pass_counter = 0; +#endif + int handled = 0; DEBUG_INTR("serial8250_interrupt(%d)...", irq); @@ -1584,12 +1587,18 @@ static irqreturn_t serial8250_interrupt( l = l->next; + /* + * On preempt-rt we can be preempted and run in our + * own thread. + */ +#ifndef CONFIG_PREEMPT_RT if (l == i->head && pass_counter++ > PASS_LIMIT) { /* If we hit this, we're dead. */ printk(KERN_ERR "serial8250: too much work for " "irq%d\n", irq); break; } +#endif } while (l != end); spin_unlock(&i->lock); patches/preempt-realtime-x86_64.patch0000664000076400007640000001742411155205036016474 0ustar tglxtglxSubject: patches/preempt-realtime-x86_64.patch Signed-off-by: Ingo Molnar --- arch/x86/include/asm/acpi.h | 4 ++-- arch/x86/include/asm/i8259.h | 2 +- arch/x86/include/asm/spinlock.h | 6 +++--- arch/x86/include/asm/tlbflush.h | 2 ++ arch/x86/include/asm/vgtod.h | 2 +- arch/x86/kernel/apic/io_apic.c | 4 ++-- arch/x86/kernel/apic/nmi.c | 2 ++ arch/x86/kernel/early_printk.c | 2 +- arch/x86/kernel/head64.c | 6 +++++- arch/x86/kernel/i8259.c | 2 +- arch/x86/kernel/process_64.c | 6 ++++-- arch/x86/kernel/signal.c | 7 +++++++ arch/x86/kernel/smp.c | 10 ++++++++++ arch/x86/mm/tlb.c | 2 +- 14 files changed, 42 insertions(+), 15 deletions(-) Index: linux-2.6-tip/arch/x86/include/asm/acpi.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/acpi.h +++ linux-2.6-tip/arch/x86/include/asm/acpi.h @@ -50,8 +50,8 @@ #define ACPI_ASM_MACROS #define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable() -#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_DISABLE_IRQS() local_irq_disable_nort() +#define ACPI_ENABLE_IRQS() local_irq_enable_nort() #define ACPI_FLUSH_CPU_CACHE() wbinvd() int __acpi_acquire_global_lock(unsigned int *lock); Index: linux-2.6-tip/arch/x86/include/asm/i8259.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/i8259.h +++ linux-2.6-tip/arch/x86/include/asm/i8259.h @@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask; #define SLAVE_ICW4_DEFAULT 0x01 #define PIC_ICW4_AEOI 2 -extern spinlock_t i8259A_lock; +extern raw_spinlock_t i8259A_lock; extern void init_8259A(int auto_eoi); extern void enable_8259A_irq(unsigned int irq); Index: linux-2.6-tip/arch/x86/include/asm/spinlock.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/spinlock.h +++ linux-2.6-tip/arch/x86/include/asm/spinlock.h @@ -295,8 +295,8 @@ static inline void __raw_write_unlock(__ : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define __raw_spin_relax(lock) cpu_relax() +#define __raw_read_relax(lock) cpu_relax() +#define __raw_write_relax(lock) cpu_relax() #endif /* _ASM_X86_SPINLOCK_H */ Index: linux-2.6-tip/arch/x86/include/asm/tlbflush.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/tlbflush.h +++ linux-2.6-tip/arch/x86/include/asm/tlbflush.h @@ -17,7 +17,9 @@ static inline void __native_flush_tlb(void) { + preempt_disable(); write_cr3(read_cr3()); + preempt_enable(); } static inline void __native_flush_tlb_global(void) Index: linux-2.6-tip/arch/x86/include/asm/vgtod.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/vgtod.h +++ linux-2.6-tip/arch/x86/include/asm/vgtod.h @@ -5,7 +5,7 @@ #include struct vsyscall_gtod_data { - seqlock_t lock; + raw_seqlock_t lock; /* open coded 'struct timespec' */ time_t wall_time_sec; Index: linux-2.6-tip/arch/x86/kernel/apic/io_apic.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/apic/io_apic.c +++ linux-2.6-tip/arch/x86/kernel/apic/io_apic.c @@ -72,8 +72,8 @@ */ int sis_apic_bug = -1; -static DEFINE_SPINLOCK(ioapic_lock); -static DEFINE_SPINLOCK(vector_lock); +static DEFINE_RAW_SPINLOCK(ioapic_lock); +static DEFINE_RAW_SPINLOCK(vector_lock); /* * # of IRQ routing registers Index: linux-2.6-tip/arch/x86/kernel/apic/nmi.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/apic/nmi.c +++ linux-2.6-tip/arch/x86/kernel/apic/nmi.c @@ -90,7 +90,9 @@ static inline unsigned int get_timer_irq */ static __init void nmi_cpu_busy(void *data) { +#ifndef CONFIG_PREEMPT_RT local_irq_enable_in_hardirq(); +#endif /* * Intentionally don't use cpu_relax here. This is * to make sure that the performance counter really ticks, Index: linux-2.6-tip/arch/x86/kernel/early_printk.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/early_printk.c +++ linux-2.6-tip/arch/x86/kernel/early_printk.c @@ -881,7 +881,7 @@ static int __initdata early_console_init asmlinkage void early_printk(const char *fmt, ...) { - char buf[512]; + static char buf[512]; int n; va_list ap; Index: linux-2.6-tip/arch/x86/kernel/head64.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/head64.c +++ linux-2.6-tip/arch/x86/kernel/head64.c @@ -30,7 +30,11 @@ static void __init zap_identity_mappings { pgd_t *pgd = pgd_offset_k(0UL); pgd_clear(pgd); - __flush_tlb_all(); + /* + * preempt_disable/enable does not work this early in the + * bootup yet: + */ + write_cr3(read_cr3()); } /* Don't add a printk in there. printk relies on the PDA which is not initialized Index: linux-2.6-tip/arch/x86/kernel/i8259.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/i8259.c +++ linux-2.6-tip/arch/x86/kernel/i8259.c @@ -32,8 +32,8 @@ */ static int i8259A_auto_eoi; -DEFINE_SPINLOCK(i8259A_lock); static void mask_and_ack_8259A(unsigned int); +DEFINE_RAW_SPINLOCK(i8259A_lock); struct irq_chip i8259A_chip = { .name = "XT-PIC", Index: linux-2.6-tip/arch/x86/kernel/process_64.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/process_64.c +++ linux-2.6-tip/arch/x86/kernel/process_64.c @@ -155,9 +155,11 @@ void cpu_idle(void) } tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); - schedule(); + local_irq_disable(); + __preempt_enable_no_resched(); + __schedule(); preempt_disable(); + local_irq_enable(); } } Index: linux-2.6-tip/arch/x86/kernel/signal.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/signal.c +++ linux-2.6-tip/arch/x86/kernel/signal.c @@ -777,6 +777,13 @@ static void do_signal(struct pt_regs *re int signr; sigset_t *oldset; +#ifdef CONFIG_PREEMPT_RT + /* + * Fully-preemptible kernel does not need interrupts disabled: + */ + local_irq_enable(); + preempt_check_resched(); +#endif /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything Index: linux-2.6-tip/arch/x86/kernel/smp.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/smp.c +++ linux-2.6-tip/arch/x86/kernel/smp.c @@ -120,6 +120,16 @@ static void native_smp_send_reschedule(i apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); } +/* + * this function sends a 'reschedule' IPI to all other CPUs. + * This is used when RT tasks are starving and other CPUs + * might be able to run them: + */ +void smp_send_reschedule_allbutself(void) +{ + apic->send_IPI_allbutself(RESCHEDULE_VECTOR); +} + void native_send_call_func_single_ipi(int cpu) { apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); Index: linux-2.6-tip/arch/x86/mm/tlb.c =================================================================== --- linux-2.6-tip.orig/arch/x86/mm/tlb.c +++ linux-2.6-tip/arch/x86/mm/tlb.c @@ -40,8 +40,8 @@ union smp_flush_state { struct { struct mm_struct *flush_mm; unsigned long flush_va; - spinlock_t tlbstate_lock; DECLARE_BITMAP(flush_cpumask, NR_CPUS); + raw_spinlock_t tlbstate_lock; }; char pad[CONFIG_X86_INTERNODE_CACHE_BYTES]; } ____cacheline_internodealigned_in_smp; patches/preempt-realtime-i386.patch0000664000076400007640000004102211160527545016226 0ustar tglxtglxSubject: preempt: realtime i386 From: Ingo Molnar Date: Wed Feb 04 00:02:56 CET 2009 Signed-off-by: Ingo Molnar --- arch/Kconfig | 5 ++++ arch/x86/Kconfig.debug | 1 arch/x86/include/asm/highmem.h | 27 ++++++++++++++++++++++++++ arch/x86/include/asm/i8253.h | 2 - arch/x86/include/asm/pci_x86.h | 2 - arch/x86/include/asm/tlbflush.h | 22 +++++++++++++++++++++ arch/x86/include/asm/xor_32.h | 19 ++++++++++++++++-- arch/x86/kernel/cpu/mtrr/generic.c | 2 - arch/x86/kernel/dumpstack_32.c | 6 +++++ arch/x86/kernel/head_32.S | 1 arch/x86/kernel/i8253.c | 2 - arch/x86/kernel/microcode_amd.c | 2 - arch/x86/kernel/microcode_intel.c | 2 - arch/x86/kernel/process_32.c | 6 +++-- arch/x86/kernel/vm86_32.c | 1 arch/x86/mm/fault.c | 1 arch/x86/mm/highmem_32.c | 38 +++++++++++++++++++++++++++++-------- arch/x86/pci/common.c | 2 - arch/x86/pci/direct.c | 29 ++++++++++++++++++---------- 19 files changed, 141 insertions(+), 29 deletions(-) Index: linux-2.6-tip/arch/Kconfig =================================================================== --- linux-2.6-tip.orig/arch/Kconfig +++ linux-2.6-tip/arch/Kconfig @@ -33,6 +33,11 @@ config OPROFILE_IBS config HAVE_OPROFILE bool +config PROFILE_NMI + bool + depends on OPROFILE + default y + config KPROBES bool "Kprobes" depends on KALLSYMS && MODULES Index: linux-2.6-tip/arch/x86/Kconfig.debug =================================================================== --- linux-2.6-tip.orig/arch/x86/Kconfig.debug +++ linux-2.6-tip/arch/x86/Kconfig.debug @@ -135,6 +135,7 @@ config DEBUG_NX_TEST config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on X86_32 + default y ---help--- If you say Y here the kernel will use a 4Kb stacksize for the kernel stack attached to each process/thread. This facilitates Index: linux-2.6-tip/arch/x86/include/asm/highmem.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/highmem.h +++ linux-2.6-tip/arch/x86/include/asm/highmem.h @@ -58,6 +58,16 @@ extern void *kmap_high(struct page *page extern void kunmap_high(struct page *page); void *kmap(struct page *page); +extern void kunmap_virt(void *ptr); +extern struct page *kmap_to_page(void *ptr); +void kunmap(struct page *page); + +void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); +void *__kmap_atomic(struct page *page, enum km_type type); +void __kunmap_atomic(void *kvaddr, enum km_type type); +void *__kmap_atomic_pfn(unsigned long pfn, enum km_type type); +struct page *__kmap_atomic_to_page(void *ptr); + void kunmap(struct page *page); void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); void *kmap_atomic(struct page *page, enum km_type type); @@ -75,6 +85,23 @@ struct page *kmap_atomic_to_page(void *p extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn); +/* + * on PREEMPT_RT kmap_atomic() is a wrapper that uses kmap(): + */ +#ifdef CONFIG_PREEMPT_RT +# define kmap_atomic_prot(page, type, prot) kmap(page) +# define kmap_atomic(page, type) kmap(page) +# define kmap_atomic_pfn(pfn, type) kmap(pfn_to_page(pfn)) +# define kunmap_atomic(kvaddr, type) kunmap_virt(kvaddr) +# define kmap_atomic_to_page(kvaddr) kmap_to_page(kvaddr) +#else +# define kmap_atomic_prot(page, type, prot) __kmap_atomic_prot(page, type, prot) +# define kmap_atomic(page, type) __kmap_atomic(page, type) +# define kmap_atomic_pfn(pfn, type) __kmap_atomic_pfn(pfn, type) +# define kunmap_atomic(kvaddr, type) __kunmap_atomic(kvaddr, type) +# define kmap_atomic_to_page(kvaddr) __kmap_atomic_to_page(kvaddr) +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_X86_HIGHMEM_H */ Index: linux-2.6-tip/arch/x86/include/asm/i8253.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/i8253.h +++ linux-2.6-tip/arch/x86/include/asm/i8253.h @@ -6,7 +6,7 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -extern spinlock_t i8253_lock; +extern raw_spinlock_t i8253_lock; extern struct clock_event_device *global_clock_event; Index: linux-2.6-tip/arch/x86/include/asm/pci_x86.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/pci_x86.h +++ linux-2.6-tip/arch/x86/include/asm/pci_x86.h @@ -83,7 +83,7 @@ struct irq_routing_table { extern unsigned int pcibios_irq_mask; extern int pcibios_scanned; -extern spinlock_t pci_config_lock; +extern raw_spinlock_t pci_config_lock; extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); Index: linux-2.6-tip/arch/x86/include/asm/tlbflush.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/tlbflush.h +++ linux-2.6-tip/arch/x86/include/asm/tlbflush.h @@ -7,6 +7,21 @@ #include #include +/* + * TLB-flush needs to be nonpreemptible on PREEMPT_RT due to the + * following complex race scenario: + * + * if the current task is lazy-TLB and does a TLB flush and + * gets preempted after the movl %%r3, %0 but before the + * movl %0, %%cr3 then its ->active_mm might change and it will + * install the wrong cr3 when it switches back. This is not a + * problem for the lazy-TLB task itself, but if the next task it + * switches to has an ->mm that is also the lazy-TLB task's + * new ->active_mm, then the scheduler will assume that cr3 is + * the new one, while we overwrote it with the old one. The result + * is the wrong cr3 in the new (non-lazy-TLB) task, which typically + * causes an infinite pagefault upon the next userspace access. + */ #ifdef CONFIG_PARAVIRT #include #else @@ -97,6 +112,13 @@ static inline void __flush_tlb_one(unsig static inline void flush_tlb_mm(struct mm_struct *mm) { + /* + * This is safe on PREEMPT_RT because if we preempt + * right after the check but before the __flush_tlb(), + * and if ->active_mm changes, then we might miss a + * TLB flush, but that TLB flush happened already when + * ->active_mm was changed: + */ if (mm == current->active_mm) __flush_tlb(); } Index: linux-2.6-tip/arch/x86/include/asm/xor_32.h =================================================================== --- linux-2.6-tip.orig/arch/x86/include/asm/xor_32.h +++ linux-2.6-tip/arch/x86/include/asm/xor_32.h @@ -865,7 +865,21 @@ static struct xor_block_template xor_blo #include #undef XOR_TRY_TEMPLATES -#define XOR_TRY_TEMPLATES \ +/* + * MMX/SSE ops disable preemption for long periods of time, + * so on PREEMPT_RT use the register-based ops only: + */ +#ifdef CONFIG_PREEMPT_RT +# define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + } while (0) +# define XOR_SELECT_TEMPLATE(FASTEST) (FASTEST) +#else +# define XOR_TRY_TEMPLATES \ do { \ xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_8regs_p); \ @@ -882,7 +896,8 @@ do { \ /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ -#define XOR_SELECT_TEMPLATE(FASTEST) \ +# define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) +#endif /* CONFIG_PREEMPT_RT */ #endif /* _ASM_X86_XOR_32_H */ Index: linux-2.6-tip/arch/x86/kernel/cpu/mtrr/generic.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/cpu/mtrr/generic.c +++ linux-2.6-tip/arch/x86/kernel/cpu/mtrr/generic.c @@ -548,7 +548,7 @@ static unsigned long set_mtrr_state(void static unsigned long cr4 = 0; -static DEFINE_SPINLOCK(set_atomicity_lock); +static DEFINE_RAW_SPINLOCK(set_atomicity_lock); /* * Since we are disabling the cache don't allow any interrupts - they Index: linux-2.6-tip/arch/x86/kernel/dumpstack_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/dumpstack_32.c +++ linux-2.6-tip/arch/x86/kernel/dumpstack_32.c @@ -93,6 +93,12 @@ show_stack_log_lvl(struct task_struct *t } +#if defined(CONFIG_DEBUG_STACKOVERFLOW) && defined(CONFIG_EVENT_TRACE) +extern unsigned long worst_stack_left; +#else +# define worst_stack_left -1L +#endif + void show_registers(struct pt_regs *regs) { int i; Index: linux-2.6-tip/arch/x86/kernel/head_32.S =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/head_32.S +++ linux-2.6-tip/arch/x86/kernel/head_32.S @@ -595,6 +595,7 @@ ignore_int: call dump_stack addl $(5*4),%esp + call dump_stack popl %ds popl %es popl %edx Index: linux-2.6-tip/arch/x86/kernel/i8253.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/i8253.c +++ linux-2.6-tip/arch/x86/kernel/i8253.c @@ -15,7 +15,7 @@ #include #include -DEFINE_SPINLOCK(i8253_lock); +DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); #ifdef CONFIG_X86_32 Index: linux-2.6-tip/arch/x86/kernel/microcode_amd.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/microcode_amd.c +++ linux-2.6-tip/arch/x86/kernel/microcode_amd.c @@ -80,7 +80,7 @@ struct microcode_amd { #define UCODE_CONTAINER_HEADER_SIZE 12 /* serialize access to the physical write */ -static DEFINE_SPINLOCK(microcode_update_lock); +static DEFINE_RAW_SPINLOCK(microcode_update_lock); static struct equiv_cpu_entry *equiv_cpu_table; Index: linux-2.6-tip/arch/x86/kernel/microcode_intel.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/microcode_intel.c +++ linux-2.6-tip/arch/x86/kernel/microcode_intel.c @@ -151,7 +151,7 @@ struct extended_sigtable { #define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE) /* serialize access to the physical write to MSR 0x79 */ -static DEFINE_SPINLOCK(microcode_update_lock); +static DEFINE_RAW_SPINLOCK(microcode_update_lock); static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { Index: linux-2.6-tip/arch/x86/kernel/process_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/process_32.c +++ linux-2.6-tip/arch/x86/kernel/process_32.c @@ -165,8 +165,10 @@ void __show_regs(struct pt_regs *regs, i regs->ax, regs->bx, regs->cx, regs->dx); printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); - printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", - (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); + printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x" + " preempt:%08x\n", + (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, + preempt_count()); if (!all) return; Index: linux-2.6-tip/arch/x86/kernel/vm86_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/vm86_32.c +++ linux-2.6-tip/arch/x86/kernel/vm86_32.c @@ -137,6 +137,7 @@ struct pt_regs *save_v86_state(struct ke local_irq_enable(); if (!current->thread.vm86_info) { + local_irq_disable(); printk("no vm86_info: BAD\n"); do_exit(SIGSEGV); } Index: linux-2.6-tip/arch/x86/mm/fault.c =================================================================== --- linux-2.6-tip.orig/arch/x86/mm/fault.c +++ linux-2.6-tip/arch/x86/mm/fault.c @@ -591,6 +591,7 @@ static int is_f00f_bug(struct pt_regs *r nr = (address - idt_descr.address) >> 3; if (nr == 6) { + zap_rt_locks(); do_invalid_op(regs, 0); return 1; } Index: linux-2.6-tip/arch/x86/mm/highmem_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/mm/highmem_32.c +++ linux-2.6-tip/arch/x86/mm/highmem_32.c @@ -19,6 +19,27 @@ void kunmap(struct page *page) kunmap_high(page); } +void kunmap_virt(void *ptr) +{ + struct page *page; + + if ((unsigned long)ptr < PKMAP_ADDR(0)) + return; + page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); + kunmap(page); +} + +struct page *kmap_to_page(void *ptr) +{ + struct page *page; + + if ((unsigned long)ptr < PKMAP_ADDR(0)) + return virt_to_page(ptr); + page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); + return page; +} +EXPORT_SYMBOL_GPL(kmap_to_page); /* PREEMPT_RT converts some modules to use this */ + static void debug_kmap_atomic_prot(enum km_type type) { #ifdef CONFIG_DEBUG_HIGHMEM @@ -70,7 +91,7 @@ static void debug_kmap_atomic_prot(enum * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) +void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; @@ -92,12 +113,12 @@ void *kmap_atomic_prot(struct page *page return (void *)vaddr; } -void *kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic(struct page *page, enum km_type type) { return kmap_atomic_prot(page, type, kmap_prot); } -void kunmap_atomic(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); @@ -125,13 +146,13 @@ void kunmap_atomic(void *kvaddr, enum km * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *__kmap_atomic_pfn(unsigned long pfn, enum km_type type) { return kmap_atomic_prot_pfn(pfn, type, kmap_prot); } -EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ +EXPORT_SYMBOL_GPL(__kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ -struct page *kmap_atomic_to_page(void *ptr) +struct page *__kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; @@ -146,8 +167,9 @@ struct page *kmap_atomic_to_page(void *p EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); -EXPORT_SYMBOL(kmap_atomic); -EXPORT_SYMBOL(kunmap_atomic); +EXPORT_SYMBOL(kunmap_virt); +EXPORT_SYMBOL(__kmap_atomic); +EXPORT_SYMBOL(__kunmap_atomic); void __init set_highmem_pages_init(void) { Index: linux-2.6-tip/arch/x86/pci/common.c =================================================================== --- linux-2.6-tip.orig/arch/x86/pci/common.c +++ linux-2.6-tip/arch/x86/pci/common.c @@ -81,7 +81,7 @@ int pcibios_scanned; * This interrupt-safe spinlock protects all accesses to PCI * configuration space. */ -DEFINE_SPINLOCK(pci_config_lock); +DEFINE_RAW_SPINLOCK(pci_config_lock); static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d) { Index: linux-2.6-tip/arch/x86/pci/direct.c =================================================================== --- linux-2.6-tip.orig/arch/x86/pci/direct.c +++ linux-2.6-tip/arch/x86/pci/direct.c @@ -223,16 +223,23 @@ static int __init pci_check_type1(void) unsigned int tmp; int works = 0; - local_irq_save(flags); + spin_lock_irqsave(&pci_config_lock, flags); outb(0x01, 0xCFB); tmp = inl(0xCF8); outl(0x80000000, 0xCF8); - if (inl(0xCF8) == 0x80000000 && pci_sanity_check(&pci_direct_conf1)) { - works = 1; + + if (inl(0xCF8) == 0x80000000) { + spin_unlock_irqrestore(&pci_config_lock, flags); + + if (pci_sanity_check(&pci_direct_conf1)) + works = 1; + + spin_lock_irqsave(&pci_config_lock, flags); } outl(tmp, 0xCF8); - local_irq_restore(flags); + + spin_unlock_irqrestore(&pci_config_lock, flags); return works; } @@ -242,17 +249,19 @@ static int __init pci_check_type2(void) unsigned long flags; int works = 0; - local_irq_save(flags); + spin_lock_irqsave(&pci_config_lock, flags); outb(0x00, 0xCFB); outb(0x00, 0xCF8); outb(0x00, 0xCFA); - if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00 && - pci_sanity_check(&pci_direct_conf2)) { - works = 1; - } - local_irq_restore(flags); + if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00) { + spin_unlock_irqrestore(&pci_config_lock, flags); + + if (pci_sanity_check(&pci_direct_conf2)) + works = 1; + } else + spin_unlock_irqrestore(&pci_config_lock, flags); return works; } patches/remove-check-pgt-cache-calls.patch0000664000076400007640000000114511160527544017561 0ustar tglxtglxSubject: remove: check pgt cache calls From: Ingo Molnar Date: Wed Feb 04 00:02:56 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/kernel/process_32.c | 1 - 1 file changed, 1 deletion(-) Index: linux-2.6-tip/arch/x86/kernel/process_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/process_32.c +++ linux-2.6-tip/arch/x86/kernel/process_32.c @@ -108,7 +108,6 @@ void cpu_idle(void) tick_nohz_stop_sched_tick(1); while (!need_resched()) { - check_pgt_cache(); rmb(); if (cpu_is_offline(cpu)) patches/preempt-realtime-sched.patch0000664000076400007640000006573411160544604016636 0ustar tglxtglxSubject: preempt: realtime sched From: Ingo Molnar Date: Wed Feb 04 00:02:55 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/hardirq.h | 13 - include/linux/sched.h | 47 +++++ kernel/mutex.c | 6 kernel/sched.c | 387 +++++++++++++++++++++++++++++++++++++++--------- kernel/sched_rt.c | 54 ++++++ lib/kernel_lock.c | 5 6 files changed, 424 insertions(+), 88 deletions(-) Index: linux-2.6-tip/include/linux/hardirq.h =================================================================== --- linux-2.6-tip.orig/include/linux/hardirq.h +++ linux-2.6-tip/include/linux/hardirq.h @@ -94,19 +94,6 @@ #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) #ifdef CONFIG_PREEMPT -# define PREEMPT_CHECK_OFFSET 1 -#else -# define PREEMPT_CHECK_OFFSET 0 -#endif - -/* - * Check whether we were atomic before we did preempt_disable(): - * (used by the scheduler) - */ -#define in_atomic_preempt_off() \ - ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) - -#ifdef CONFIG_PREEMPT # define preemptible() (preempt_count() == 0 && !irqs_disabled()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) #else Index: linux-2.6-tip/include/linux/sched.h =================================================================== --- linux-2.6-tip.orig/include/linux/sched.h +++ linux-2.6-tip/include/linux/sched.h @@ -92,6 +92,16 @@ struct sched_param { #include +#ifdef CONFIG_PREEMPT +extern int kernel_preemption; +#else +# define kernel_preemption 0 +#endif +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int voluntary_preemption; +#else +# define voluntary_preemption 0 +#endif #ifdef CONFIG_PREEMPT_SOFTIRQS extern int softirq_preemption; #else @@ -230,6 +240,28 @@ extern struct semaphore kernel_sem; #define set_task_state(tsk, state_value) \ set_mb((tsk)->state, (state_value)) +// #define PREEMPT_DIRECT + +#ifdef CONFIG_X86_LOCAL_APIC +extern void nmi_show_all_regs(void); +#else +# define nmi_show_all_regs() do { } while (0) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct exec_domain; + /* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to @@ -364,6 +396,11 @@ extern signed long schedule_timeout_unin asmlinkage void __schedule(void); asmlinkage void schedule(void); extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); +/* + * This one can be called with interrupts disabled, only + * to be used by lowlevel arch code! + */ +asmlinkage void __sched __schedule(void); struct nsproxy; struct user_namespace; @@ -1646,6 +1683,15 @@ extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) +#ifdef CONFIG_PREEMPT_RT +extern void __put_task_struct_cb(struct rcu_head *rhp); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + call_rcu(&t->rcu, __put_task_struct_cb); +} +#else extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) @@ -1653,6 +1699,7 @@ static inline void put_task_struct(struc if (atomic_dec_and_test(&t->usage)) __put_task_struct(t); } +#endif extern cputime_t task_utime(struct task_struct *p); extern cputime_t task_stime(struct task_struct *p); Index: linux-2.6-tip/kernel/mutex.c =================================================================== --- linux-2.6-tip.orig/kernel/mutex.c +++ linux-2.6-tip/kernel/mutex.c @@ -248,7 +248,13 @@ __mutex_lock_common(struct mutex *lock, /* didnt get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); + + local_irq_disable(); + __preempt_enable_no_resched(); __schedule(); + preempt_disable(); + local_irq_enable(); + spin_lock_mutex(&lock->wait_lock, flags); } Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -4,6 +4,7 @@ * Kernel scheduler and related syscalls * * Copyright (C) 1991-2002 Linus Torvalds + * Copyright (C) 2004 Red Hat, Inc., Ingo Molnar * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe @@ -16,6 +17,7 @@ * by Davide Libenzi, preemptible kernel bits by Robert Love. * 2003-09-03 Interactivity tuning by Con Kolivas. * 2004-04-02 Scheduler domains code by Nick Piggin + * 2004-10-13 Real-Time Preemption support by Ingo Molnar * 2007-04-15 Work begun on replacing all interactivity tuning with a * fair scheduling design by Con Kolivas. * 2007-05-05 Load balancing (smp-nice) and other improvements @@ -60,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -105,6 +108,20 @@ #define NICE_0_LOAD SCHED_LOAD_SCALE #define NICE_0_SHIFT SCHED_LOAD_SHIFT +#if (BITS_PER_LONG < 64) +#define JIFFIES_TO_NS64(TIME) \ + ((unsigned long long)(TIME) * ((unsigned long) (1000000000 / HZ))) + +#define NS64_TO_JIFFIES(TIME) \ + ((((unsigned long long)((TIME)) >> BITS_PER_LONG) * \ + (1 + NS_TO_JIFFIES(~0UL))) + NS_TO_JIFFIES((unsigned long)(TIME))) +#else /* BITS_PER_LONG < 64 */ + +#define NS64_TO_JIFFIES(TIME) NS_TO_JIFFIES(TIME) +#define JIFFIES_TO_NS64(TIME) JIFFIES_TO_NS(TIME) + +#endif /* BITS_PER_LONG < 64 */ + /* * These are the 'tuning knobs' of the scheduler: * @@ -148,6 +165,32 @@ static inline void sg_inc_cpu_power(stru } #endif +#define TASK_PREEMPTS_CURR(p, rq) \ + ((p)->prio < (rq)->curr->prio) + +/* + * Tweaks for current + */ + +#ifdef CURRENT_PTR +struct task_struct * const ___current = &init_task; +struct task_struct ** const current_ptr = (struct task_struct ** const)&___current; +struct thread_info * const current_ti = &init_thread_union.thread_info; +struct thread_info ** const current_ti_ptr = (struct thread_info ** const)¤t_ti; + +EXPORT_SYMBOL(___current); +EXPORT_SYMBOL(current_ti); + +/* + * The scheduler itself doesnt want 'current' to be cached + * during context-switches: + */ +# undef current +# define current __current() +# undef current_thread_info +# define current_thread_info() __current_thread_info() +#endif + static inline int rt_policy(int policy) { if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) @@ -170,7 +213,7 @@ struct rt_prio_array { struct rt_bandwidth { /* nests inside the rq lock: */ - spinlock_t rt_runtime_lock; + raw_spinlock_t rt_runtime_lock; ktime_t rt_period; u64 rt_runtime; struct hrtimer rt_period_timer; @@ -500,11 +543,12 @@ struct rt_rq { int overloaded; struct plist_head pushable_tasks; #endif + unsigned long rt_nr_uninterruptible; int rt_throttled; u64 rt_time; u64 rt_runtime; /* Nests inside the rq lock: */ - spinlock_t rt_runtime_lock; + raw_spinlock_t rt_runtime_lock; #ifdef CONFIG_RT_GROUP_SCHED unsigned long rt_nr_boosted; @@ -567,7 +611,7 @@ static struct root_domain def_root_domai */ struct rq { /* runqueue lock: */ - spinlock_t lock; + raw_spinlock_t lock; /* * nr_running and cpu_load should be in the same cacheline because @@ -605,6 +649,8 @@ struct rq { */ unsigned long nr_uninterruptible; + unsigned long switch_timestamp; + unsigned long slice_avg; struct task_struct *curr, *idle; unsigned long next_balance; struct mm_struct *prev_mm; @@ -662,6 +708,13 @@ struct rq { /* BKL stats */ unsigned int bkl_count; + + /* RT-overload stats: */ + unsigned long rto_schedule; + unsigned long rto_schedule_tail; + unsigned long rto_wakeup; + unsigned long rto_pulled; + unsigned long rto_pushed; #endif }; @@ -889,11 +942,23 @@ static inline u64 global_rt_runtime(void return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; } +/* + * We really dont want to do anything complex within switch_to() + * on PREEMPT_RT - this check enforces this. + */ +#ifdef prepare_arch_switch +# ifdef CONFIG_PREEMPT_RT +# error FIXME +# else +# define _finish_arch_switch finish_arch_switch +# endif +#endif + #ifndef prepare_arch_switch # define prepare_arch_switch(next) do { } while (0) #endif #ifndef finish_arch_switch -# define finish_arch_switch(prev) do { } while (0) +# define _finish_arch_switch(prev) do { } while (0) #endif static inline int task_current(struct rq *rq, struct task_struct *p) @@ -924,7 +989,7 @@ static inline void finish_lock_switch(st */ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); - spin_unlock_irq(&rq->lock); + spin_unlock(&rq->lock); } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ @@ -965,8 +1030,8 @@ static inline void finish_lock_switch(st smp_wmb(); prev->oncpu = 0; #endif -#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW - local_irq_enable(); +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_disable(); #endif } #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ @@ -1840,6 +1905,8 @@ static inline int normal_prio(struct tas prio = MAX_RT_PRIO-1 - p->rt_priority; else prio = __normal_prio(p); + +// trace_special_pid(p->pid, PRIO(p), __PRIO(prio)); return prio; } @@ -2421,6 +2488,13 @@ try_to_wake_up(struct task_struct *p, un } #endif +#ifdef CONFIG_PREEMPT_RT + /* + * sync wakeups can increase wakeup latencies: + */ + if (rt_task(p)) + sync = 0; +#endif smp_wmb(); rq = task_rq_lock(p, &flags); update_rq_clock(rq); @@ -2504,7 +2578,10 @@ out_running: trace_sched_wakeup(rq, p, success); check_preempt_curr(rq, p, sync); - p->state = TASK_RUNNING; + if (mutex) + p->state = TASK_RUNNING_MUTEX; + else + p->state = TASK_RUNNING; #ifdef CONFIG_SMP if (p->sched_class->task_wake_up) p->sched_class->task_wake_up(rq, p); @@ -2785,7 +2862,7 @@ static void finish_task_switch(struct rq * Manfred Spraul */ prev_state = prev->state; - finish_arch_switch(prev); + _finish_arch_switch(prev); perf_counter_task_sched_in(current, cpu_of(rq)); finish_lock_switch(rq, prev); #ifdef CONFIG_SMP @@ -2813,12 +2890,16 @@ static void finish_task_switch(struct rq asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { - struct rq *rq = this_rq(); - - finish_task_switch(rq, prev); + preempt_disable(); + BUG_ON((preempt_count() & 0xffff) != 2); + finish_task_switch(this_rq(), prev); + __preempt_enable_no_resched(); + local_irq_enable(); #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ preempt_enable(); +#else + preempt_check_resched(); #endif if (current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); @@ -2866,6 +2947,11 @@ context_switch(struct rq *rq, struct tas spin_release(&rq->lock.dep_map, 1, _THIS_IP_); #endif +#ifdef CURRENT_PTR + barrier(); + *current_ptr = next; + *current_ti_ptr = next->thread_info; +#endif /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); @@ -2912,6 +2998,11 @@ unsigned long nr_uninterruptible(void) return sum; } +unsigned long nr_uninterruptible_cpu(int cpu) +{ + return cpu_rq(cpu)->nr_uninterruptible; +} + unsigned long long nr_context_switches(void) { int i; @@ -4642,6 +4733,8 @@ void scheduler_tick(void) sched_clock_tick(); + BUG_ON(!irqs_disabled()); + spin_lock(&rq->lock); update_rq_clock(rq); update_cpu_load(rq); @@ -4721,8 +4814,8 @@ static noinline void __schedule_bug(stru { struct pt_regs *regs = get_irq_regs(); - printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", - prev->comm, prev->pid, preempt_count()); + printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d, CPU#%d\n", + prev->comm, preempt_count(), prev->pid, smp_processor_id()); debug_show_held_locks(prev); print_modules(); @@ -4740,12 +4833,14 @@ static noinline void __schedule_bug(stru */ static inline void schedule_debug(struct task_struct *prev) { +// WARN_ON(system_state == SYSTEM_BOOTING); + /* * Test if we are atomic. Since do_exit() needs to call into * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ - if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) + if (unlikely(in_atomic() && !prev->exit_state)) __schedule_bug(prev); profile_hit(SCHED_PROFILING, __builtin_return_address(0)); @@ -4830,10 +4925,11 @@ asmlinkage void __sched __schedule(void) switch_count = &prev->nivcsw; release_kernel_lock(prev); -need_resched_nonpreemptible: schedule_debug(prev); + preempt_disable(); + if (sched_feat(HRTICK)) hrtick_clear(rq); @@ -4841,14 +4937,20 @@ need_resched_nonpreemptible: update_rq_clock(rq); clear_tsk_need_resched(prev); - if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { + if ((prev->state & ~TASK_RUNNING_MUTEX) && + !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) prev->state = TASK_RUNNING; - else + else { + touch_softlockup_watchdog(); deactivate_task(rq, prev, 1); + } switch_count = &prev->nvcsw; } + if (preempt_count() & PREEMPT_ACTIVE) + sub_preempt_count(PREEMPT_ACTIVE); + #ifdef CONFIG_SMP if (prev->sched_class->pre_schedule) prev->sched_class->pre_schedule(rq, prev); @@ -4875,19 +4977,26 @@ need_resched_nonpreemptible: */ cpu = smp_processor_id(); rq = cpu_rq(cpu); - } else - spin_unlock_irq(&rq->lock); + __preempt_enable_no_resched(); + } else { + __preempt_enable_no_resched(); + spin_unlock(&rq->lock); + } - if (unlikely(reacquire_kernel_lock(current) < 0)) - goto need_resched_nonpreemptible; + reacquire_kernel_lock(current); + BUG_ON(preempt_count() & 0xffff); } asmlinkage void __sched schedule(void) { + BUG_ON((preempt_count() & 0xffff) && !current->exit_state); need_resched: - preempt_disable(); + local_irq_disable(); __schedule(); - __preempt_enable_no_resched(); + local_irq_enable(); + + BUG_ON(preempt_count() & 0xffff); + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } @@ -4955,6 +5064,35 @@ out: #endif #ifdef CONFIG_PREEMPT + +/* + * Global flag to turn preemption off on a CONFIG_PREEMPT kernel: + */ +int kernel_preemption = 1; + +static int __init preempt_setup (char *str) +{ + if (!strncmp(str, "off", 3)) { + if (kernel_preemption) { + printk(KERN_INFO "turning off kernel preemption!\n"); + kernel_preemption = 0; + } + return 1; + } + if (!strncmp(str, "on", 2)) { + if (!kernel_preemption) { + printk(KERN_INFO "turning on kernel preemption!\n"); + kernel_preemption = 1; + } + return 1; + } + get_option(&str, &kernel_preemption); + + return 1; +} + +__setup("preempt=", preempt_setup); + /* * this is the entry point to schedule() from in-kernel preemption * off of preempt_enable. Kernel preemptions off return from interrupt @@ -4966,6 +5104,8 @@ asmlinkage void __sched preempt_schedule struct task_struct *task = current; int saved_lock_depth; + if (!kernel_preemption) + return; /* * If there is a non-zero preempt_count or interrupts are disabled, * we do not want to preempt the current task. Just return.. @@ -4974,6 +5114,7 @@ asmlinkage void __sched preempt_schedule return; do { + local_irq_disable(); add_preempt_count(PREEMPT_ACTIVE); /* @@ -4983,9 +5124,9 @@ asmlinkage void __sched preempt_schedule */ saved_lock_depth = task->lock_depth; task->lock_depth = -1; - schedule(); + __schedule(); task->lock_depth = saved_lock_depth; - sub_preempt_count(PREEMPT_ACTIVE); + local_irq_enable(); /* * Check again in case we missed a preemption opportunity @@ -4997,10 +5138,10 @@ asmlinkage void __sched preempt_schedule EXPORT_SYMBOL(preempt_schedule); /* - * this is the entry point to schedule() from kernel preemption - * off of irq context. - * Note, that this is called and return with irqs disabled. This will - * protect us against recursive calling from irq. + * this is is the entry point for the IRQ return path. Called with + * interrupts disabled. To avoid infinite irq-entry recursion problems + * with fast-paced IRQ sources we do all of this carefully to never + * enable interrupts again. */ asmlinkage void __sched preempt_schedule_irq(void) { @@ -5008,10 +5149,17 @@ asmlinkage void __sched preempt_schedule struct task_struct *task = current; int saved_lock_depth; - /* Catch callers which need to be fixed */ - WARN_ON_ONCE(ti->preempt_count || !irqs_disabled()); + if (!kernel_preemption) + return; + /* + * If there is a non-zero preempt_count then just return. + * (interrupts are disabled) + */ + if (unlikely(ti->preempt_count)) + return; do { + local_irq_disable(); add_preempt_count(PREEMPT_ACTIVE); /* @@ -5021,11 +5169,9 @@ asmlinkage void __sched preempt_schedule */ saved_lock_depth = task->lock_depth; task->lock_depth = -1; - local_irq_enable(); - schedule(); + __schedule(); local_irq_disable(); task->lock_depth = saved_lock_depth; - sub_preempt_count(PREEMPT_ACTIVE); /* * Check again in case we missed a preemption opportunity @@ -5410,6 +5556,8 @@ void task_setprio(struct task_struct *p, p->prio = prio; +// trace_special_pid(p->pid, __PRIO(oldprio), PRIO(p)); + if (running) p->sched_class->set_curr_task(rq); if (on_rq) { @@ -5417,6 +5565,8 @@ void task_setprio(struct task_struct *p, check_class_changed(rq, p, prev_class, oldprio, running); } +// trace_special(prev_resched, _need_resched(), 0); + task_rq_unlock(rq, &flags); } @@ -6056,14 +6206,51 @@ SYSCALL_DEFINE0(sched_yield) */ spin_unlock_no_resched(&rq->lock); - schedule(); + __schedule(); + + local_irq_enable(); + preempt_check_resched(); return 0; } +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) +void __might_sleep(char *file, int line) +{ +#ifdef in_atomic + static unsigned long prev_jiffy; /* ratelimiting */ + + if ((!in_atomic() && !irqs_disabled()) || + system_state != SYSTEM_RUNNING || oops_in_progress) + return; + + if (debug_direct_keyboard && hardirq_count()) + return; + + if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) + return; + prev_jiffy = jiffies; + + printk(KERN_ERR + "BUG: sleeping function called from invalid context at %s:%d\n", + file, line); + printk(KERN_ERR + "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", + in_atomic(), irqs_disabled(), + current->pid, current->comm); + + debug_show_held_locks(current); + if (irqs_disabled()) + print_irqtrace_events(current); + dump_stack(); +#endif +} +EXPORT_SYMBOL(__might_sleep); +#endif + static void __cond_resched(void) { -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) __might_sleep(__FILE__, __LINE__); #endif /* @@ -6072,10 +6259,11 @@ static void __cond_resched(void) * cond_resched() call. */ do { + local_irq_disable(); add_preempt_count(PREEMPT_ACTIVE); - schedule(); - sub_preempt_count(PREEMPT_ACTIVE); + __schedule(); } while (need_resched()); + local_irq_enable(); } int __sched _cond_resched(void) @@ -6115,6 +6303,25 @@ int __cond_resched_raw_spinlock(raw_spin } EXPORT_SYMBOL(__cond_resched_raw_spinlock); +#ifdef CONFIG_PREEMPT_RT + +int __cond_resched_spinlock(spinlock_t *lock) +{ +#if (defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)) || defined(CONFIG_PREEMPT_RT) + if (lock->break_lock) { + lock->break_lock = 0; + spin_unlock_no_resched(lock); + __cond_resched(); + spin_lock(lock); + return 1; + } +#endif + return 0; +} +EXPORT_SYMBOL(__cond_resched_spinlock); + +#endif + /* * Voluntarily preempt a process context that has softirqs disabled: */ @@ -6161,11 +6368,15 @@ int cond_resched_hardirq_context(void) WARN_ON_ONCE(!irqs_disabled()); if (hardirq_need_resched()) { +#ifndef CONFIG_PREEMPT_RT irq_exit(); +#endif local_irq_enable(); __cond_resched(); +#ifndef CONFIG_PREEMPT_RT local_irq_disable(); __irq_enter(); +#endif return 1; } @@ -6173,17 +6384,58 @@ int cond_resched_hardirq_context(void) } EXPORT_SYMBOL(cond_resched_hardirq_context); +#ifdef CONFIG_PREEMPT_VOLUNTARY + +int voluntary_preemption = 1; + +EXPORT_SYMBOL(voluntary_preemption); + +static int __init voluntary_preempt_setup (char *str) +{ + if (!strncmp(str, "off", 3)) + voluntary_preemption = 0; + else + get_option(&str, &voluntary_preemption); + if (!voluntary_preemption) + printk("turning off voluntary preemption!\n"); + + return 1; +} + +__setup("voluntary-preempt=", voluntary_preempt_setup); + +#endif + /** * yield - yield the current processor to other threads. * * This is a shortcut for kernel-space yielding - it marks the * thread runnable and calls sys_sched_yield(). */ -void __sched yield(void) +void __sched __yield(void) { set_current_state(TASK_RUNNING); sys_sched_yield(); } + +void __sched yield(void) +{ + static int once = 1; + + /* + * it's a bug to rely on yield() with RT priorities. We print + * the first occurance after bootup ... this will still give + * us an idea about the scope of the problem, without spamming + * the syslog: + */ + if (once && rt_task(current)) { + once = 0; + printk(KERN_ERR "BUG: %s:%d RT task yield()-ing!\n", + current->comm, current->pid); + dump_stack(); + } + __yield(); +} EXPORT_SYMBOL(yield); /* @@ -6360,6 +6612,7 @@ void sched_show_task(struct task_struct void show_state_filter(unsigned long state_filter) { struct task_struct *g, *p; + int do_unlock = 1; #if BITS_PER_LONG == 32 printk(KERN_INFO @@ -6368,7 +6621,16 @@ void show_state_filter(unsigned long sta printk(KERN_INFO " task PC stack pid father\n"); #endif +#ifdef CONFIG_PREEMPT_RT + if (!read_trylock(&tasklist_lock)) { + printk("hm, tasklist_lock write-locked.\n"); + printk("ignoring ...\n"); + do_unlock = 0; + } +#else read_lock(&tasklist_lock); +#endif + do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow @@ -6384,7 +6646,8 @@ void show_state_filter(unsigned long sta #ifdef CONFIG_SCHED_DEBUG sysrq_sched_debug_show(); #endif - read_unlock(&tasklist_lock); + if (do_unlock) + read_unlock(&tasklist_lock); /* * Only show locks if all tasks are dumped: */ @@ -6556,11 +6819,18 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) { struct rq *rq_dest, *rq_src; + unsigned long flags; int ret = 0, on_rq; if (unlikely(!cpu_active(dest_cpu))) return ret; + /* + * PREEMPT_RT: this relies on write_lock_irq(&tasklist_lock) + * disabling interrupts - which on PREEMPT_RT does not do: + */ + local_irq_save(flags); + rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); @@ -6585,6 +6855,8 @@ done: ret = 1; fail: double_rq_unlock(rq_src, rq_dest); + local_irq_restore(flags); + return ret; } @@ -8882,6 +9154,9 @@ void __init sched_init(void) atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current); +#ifdef CONFIG_PREEMPT_RT + printk("Real-Time Preemption Support (C) 2004-2007 Ingo Molnar\n"); +#endif /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, @@ -8906,36 +9181,6 @@ void __init sched_init(void) scheduler_running = 1; } -#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) -void __might_sleep(char *file, int line) -{ -#ifdef in_atomic - static unsigned long prev_jiffy; /* ratelimiting */ - - if ((!in_atomic() && !irqs_disabled()) || - system_state != SYSTEM_RUNNING || oops_in_progress) - return; - if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) - return; - prev_jiffy = jiffies; - - printk(KERN_ERR - "BUG: sleeping function called from invalid context at %s:%d\n", - file, line); - printk(KERN_ERR - "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", - in_atomic(), irqs_disabled(), - current->pid, current->comm); - - debug_show_held_locks(current); - if (irqs_disabled()) - print_irqtrace_events(current); - dump_stack(); -#endif -} -EXPORT_SYMBOL(__might_sleep); -#endif - #ifdef CONFIG_MAGIC_SYSRQ static void normalize_task(struct rq *rq, struct task_struct *p) { Index: linux-2.6-tip/kernel/sched_rt.c =================================================================== --- linux-2.6-tip.orig/kernel/sched_rt.c +++ linux-2.6-tip/kernel/sched_rt.c @@ -844,6 +844,48 @@ static void dequeue_rt_entity(struct sch } } +static inline void incr_rt_nr_uninterruptible(struct task_struct *p, + struct rq *rq) +{ + rq->rt.rt_nr_uninterruptible++; +} + +static inline void decr_rt_nr_uninterruptible(struct task_struct *p, + struct rq *rq) +{ + rq->rt.rt_nr_uninterruptible--; +} + +unsigned long rt_nr_running(void) +{ + unsigned long i, sum = 0; + + for_each_online_cpu(i) + sum += cpu_rq(i)->rt.rt_nr_running; + + return sum; +} + +unsigned long rt_nr_running_cpu(int cpu) +{ + return cpu_rq(cpu)->rt.rt_nr_running; +} + +unsigned long rt_nr_uninterruptible(void) +{ + unsigned long i, sum = 0; + + for_each_online_cpu(i) + sum += cpu_rq(i)->rt.rt_nr_uninterruptible; + + return sum; +} + +unsigned long rt_nr_uninterruptible_cpu(int cpu) +{ + return cpu_rq(cpu)->rt.rt_nr_uninterruptible; +} + /* * Adding/removing a task to/from a priority array: */ @@ -856,6 +898,9 @@ static void enqueue_task_rt(struct rq *r enqueue_rt_entity(rt_se); + if (p->state == TASK_UNINTERRUPTIBLE) + decr_rt_nr_uninterruptible(p, rq); + if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); @@ -867,6 +912,10 @@ static void dequeue_task_rt(struct rq *r struct sched_rt_entity *rt_se = &p->rt; update_curr_rt(rq); + + if (p->state == TASK_UNINTERRUPTIBLE) + incr_rt_nr_uninterruptible(p, rq); + dequeue_rt_entity(rt_se); dequeue_pushable_task(rq, p); @@ -1453,8 +1502,10 @@ static int pull_rt_task(struct rq *this_ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) { /* Try to pull RT tasks here if we lower this rq's prio */ - if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) + if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) { pull_rt_task(rq); + schedstat_inc(rq, rto_schedule); + } } /* @@ -1536,7 +1587,6 @@ static void set_cpus_allowed_rt(struct t */ if (weight > 1) enqueue_pushable_task(rq, p); - } if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { Index: linux-2.6-tip/lib/kernel_lock.c =================================================================== --- linux-2.6-tip.orig/lib/kernel_lock.c +++ linux-2.6-tip/lib/kernel_lock.c @@ -41,16 +41,17 @@ int __lockfunc __reacquire_kernel_lock(v struct task_struct *task = current; int saved_lock_depth = task->lock_depth; + local_irq_enable(); BUG_ON(saved_lock_depth < 0); task->lock_depth = -1; - __preempt_enable_no_resched(); down(&kernel_sem); - preempt_disable(); task->lock_depth = saved_lock_depth; + local_irq_disable(); + return 0; } patches/preempt-realtime-sched-remove-debug.patch0000664000076400007640000000210311160544604021172 0ustar tglxtglxSubject: preempt: realtime sched remove debug From: Ingo Molnar Date: Wed Feb 04 06:17:45 CET 2009 Signed-off-by: Ingo Molnar --- kernel/sched.c | 5 ----- 1 file changed, 5 deletions(-) Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -2891,7 +2891,6 @@ asmlinkage void schedule_tail(struct tas __releases(rq->lock) { preempt_disable(); - BUG_ON((preempt_count() & 0xffff) != 2); finish_task_switch(this_rq(), prev); __preempt_enable_no_resched(); local_irq_enable(); @@ -4984,19 +4983,15 @@ asmlinkage void __sched __schedule(void) } reacquire_kernel_lock(current); - BUG_ON(preempt_count() & 0xffff); } asmlinkage void __sched schedule(void) { - BUG_ON((preempt_count() & 0xffff) && !current->exit_state); need_resched: local_irq_disable(); __schedule(); local_irq_enable(); - BUG_ON(preempt_count() & 0xffff); - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } patches/preempt-realtime-mmdrop-delayed.patch0000664000076400007640000001646311157735266020462 0ustar tglxtglxSubject: preempt: realtime mmdrop delayed From: Ingo Molnar Date: Wed Feb 04 00:02:55 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/mm_types.h | 3 + include/linux/sched.h | 8 ++ kernel/fork.c | 141 +++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched.c | 6 +- 4 files changed, 157 insertions(+), 1 deletion(-) Index: linux-2.6-tip/include/linux/mm_types.h =================================================================== --- linux-2.6-tip.orig/include/linux/mm_types.h +++ linux-2.6-tip/include/linux/mm_types.h @@ -241,6 +241,9 @@ struct mm_struct { /* Architecture-specific MM context */ mm_context_t context; + /* realtime bits */ + struct list_head delayed_drop; + /* Swap token stuff */ /* * Last value of global fault stamp as seen by this process. Index: linux-2.6-tip/include/linux/sched.h =================================================================== --- linux-2.6-tip.orig/include/linux/sched.h +++ linux-2.6-tip/include/linux/sched.h @@ -2063,12 +2063,20 @@ extern struct mm_struct * mm_alloc(void) /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); +extern void __mmdrop_delayed(struct mm_struct *); + static inline void mmdrop(struct mm_struct * mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } +static inline void mmdrop_delayed(struct mm_struct * mm) +{ + if (atomic_dec_and_test(&mm->mm_count)) + __mmdrop_delayed(mm); +} + /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ Index: linux-2.6-tip/kernel/fork.c =================================================================== --- linux-2.6-tip.orig/kernel/fork.c +++ linux-2.6-tip/kernel/fork.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -49,6 +50,8 @@ #include #include #include +#include +#include #include #include #include @@ -88,6 +91,15 @@ __cacheline_aligned DEFINE_RWLOCK(taskli DEFINE_TRACE(sched_process_fork); +/* + * Delayed mmdrop. In the PREEMPT_RT case we + * dont want to do this from the scheduling + * context. + */ +static DEFINE_PER_CPU(struct task_struct *, desched_task); + +static DEFINE_PER_CPU(struct list_head, delayed_drop_list); + int nr_processes(void) { int cpu; @@ -174,6 +186,8 @@ void __put_task_struct(struct task_struc void __init fork_init(unsigned long mempages) { + int i; + #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES @@ -204,6 +218,9 @@ void __init fork_init(unsigned long memp init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; + + for (i = 0; i < NR_CPUS; i++) + INIT_LIST_HEAD(&per_cpu(delayed_drop_list, i)); } int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, @@ -285,6 +302,7 @@ static int dup_mmap(struct mm_struct *mm mm->locked_vm = 0; mm->mmap = NULL; mm->mmap_cache = NULL; + INIT_LIST_HEAD(&mm->delayed_drop); mm->free_area_cache = oldmm->mmap_base; mm->cached_hole_size = ~0UL; mm->map_count = 0; @@ -1277,7 +1295,9 @@ static struct task_struct *copy_process( attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail_rcu(&p->tasks, &init_task.tasks); + preempt_disable(); __get_cpu_var(process_counts)++; + preempt_enable(); } attach_pid(p, PIDTYPE_PID, pid); nr_threads++; @@ -1743,3 +1763,124 @@ int unshare_files(struct files_struct ** task_unlock(task); return 0; } + +static int mmdrop_complete(void) +{ + struct list_head *head; + int ret = 0; + + head = &get_cpu_var(delayed_drop_list); + while (!list_empty(head)) { + struct mm_struct *mm = list_entry(head->next, + struct mm_struct, delayed_drop); + list_del(&mm->delayed_drop); + put_cpu_var(delayed_drop_list); + + __mmdrop(mm); + ret = 1; + + head = &get_cpu_var(delayed_drop_list); + } + put_cpu_var(delayed_drop_list); + + return ret; +} + +/* + * We dont want to do complex work from the scheduler, thus + * we delay the work to a per-CPU worker thread: + */ +void __mmdrop_delayed(struct mm_struct *mm) +{ + struct task_struct *desched_task; + struct list_head *head; + + head = &get_cpu_var(delayed_drop_list); + list_add_tail(&mm->delayed_drop, head); + desched_task = __get_cpu_var(desched_task); + if (desched_task) + wake_up_process(desched_task); + put_cpu_var(delayed_drop_list); +} + +static int desched_thread(void * __bind_cpu) +{ + set_user_nice(current, -10); + current->flags |= PF_NOFREEZE | PF_SOFTIRQ; + + set_current_state(TASK_INTERRUPTIBLE); + + while (!kthread_should_stop()) { + + if (mmdrop_complete()) + continue; + schedule(); + + /* + * This must be called from time to time on ia64, and is a + * no-op on other archs. Used to be in cpu_idle(), but with + * the new -rt semantics it can't stay there. + */ + check_pgt_cache(); + + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +static int __devinit cpu_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + int hotcpu = (unsigned long)hcpu; + struct task_struct *p; + + switch (action) { + case CPU_UP_PREPARE: + + BUG_ON(per_cpu(desched_task, hotcpu)); + INIT_LIST_HEAD(&per_cpu(delayed_drop_list, hotcpu)); + p = kthread_create(desched_thread, hcpu, "desched/%d", hotcpu); + if (IS_ERR(p)) { + printk("desched_thread for %i failed\n", hotcpu); + return NOTIFY_BAD; + } + per_cpu(desched_task, hotcpu) = p; + kthread_bind(p, hotcpu); + break; + case CPU_ONLINE: + + wake_up_process(per_cpu(desched_task, hotcpu)); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + + /* Unbind so it can run. Fall thru. */ + kthread_bind(per_cpu(desched_task, hotcpu), smp_processor_id()); + case CPU_DEAD: + + p = per_cpu(desched_task, hotcpu); + per_cpu(desched_task, hotcpu) = NULL; + kthread_stop(p); + takeover_tasklets(hotcpu); + break; +#endif /* CONFIG_HOTPLUG_CPU */ + } + return NOTIFY_OK; +} + +static struct notifier_block __devinitdata cpu_nfb = { + .notifier_call = cpu_callback +}; + +__init int spawn_desched_task(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + + cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); + cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); + register_cpu_notifier(&cpu_nfb); + return 0; +} + Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -2871,8 +2871,12 @@ static void finish_task_switch(struct rq #endif fire_sched_in_preempt_notifiers(current); + /* + * Delay the final freeing of the mm or task, so that we dont have + * to do complex work from within the scheduler: + */ if (mm) - mmdrop(mm); + mmdrop_delayed(mm); if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this patches/preempt-realtime-sched-i386.patch0000664000076400007640000000400111160527544017305 0ustar tglxtglxSubject: preempt: realtime sched i386 From: Ingo Molnar Date: Wed Feb 04 00:02:55 CET 2009 Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_32.S | 11 +++++++---- arch/x86/kernel/process_32.c | 4 +++- 2 files changed, 10 insertions(+), 5 deletions(-) Index: linux-2.6-tip/arch/x86/kernel/entry_32.S =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/entry_32.S +++ linux-2.6-tip/arch/x86/kernel/entry_32.S @@ -371,14 +371,18 @@ END(ret_from_exception) #ifdef CONFIG_PREEMPT ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) + cmpl $0, kernel_preemption + jz restore_nocheck cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? jnz restore_nocheck need_resched: movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl - jz restore_all + jz restore_nocheck testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all + jz restore_nocheck + DISABLE_INTERRUPTS(CLBR_ANY) + call preempt_schedule_irq jmp need_resched END(resume_kernel) @@ -616,12 +620,11 @@ work_pending: testl $(_TIF_NEED_RESCHED), %ecx jz work_notifysig work_resched: - call schedule + call __schedule LOCKDEP_SYS_EXIT DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt # setting need_resched or sigpending # between sampling and the iret - TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? Index: linux-2.6-tip/arch/x86/kernel/process_32.c =================================================================== --- linux-2.6-tip.orig/arch/x86/kernel/process_32.c +++ linux-2.6-tip/arch/x86/kernel/process_32.c @@ -119,10 +119,12 @@ void cpu_idle(void) pm_idle(); start_critical_timings(); } + local_irq_disable(); tick_nohz_restart_sched_tick(); __preempt_enable_no_resched(); - schedule(); + __schedule(); preempt_disable(); + local_irq_enable(); } } patches/preempt-realtime-prevent-idle-boosting.patch0000664000076400007640000000347211160544604021757 0ustar tglxtglxSubject: Premmpt-RT: Preevent boosting of idle task Idle task boosting is a nono in general. There is one exception, when NOHZ is active: The idle task calls get_next_timer_interrupt() and holds the timer wheel base->lock on the CPU and another CPU wants to access the timer (probably to cancel it). We can safely ignore the boosting request, as the idle CPU runs this code with interrupts disabled and will complete the lock protected section without being interrupted. So there is no real need to boost. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -5538,6 +5538,25 @@ void task_setprio(struct task_struct *p, BUG_ON(prio < 0 || prio > MAX_PRIO); rq = task_rq_lock(p, &flags); + + /* + * Idle task boosting is a nono in general. There is one + * exception, when NOHZ is active: + * + * The idle task calls get_next_timer_interrupt() and holds + * the timer wheel base->lock on the CPU and another CPU wants + * to access the timer (probably to cancel it). We can safely + * ignore the boosting request, as the idle CPU runs this code + * with interrupts disabled and will complete the lock + * protected section without being interrupted. So there is no + * real need to boost. + */ + if (unlikely(p == rq->idle)) { + WARN_ON(p != rq->curr); + WARN_ON(p->pi_blocked_on); + goto out_unlock; + } + update_rq_clock(rq); oldprio = p->prio; @@ -5566,6 +5585,7 @@ void task_setprio(struct task_struct *p, } // trace_special(prev_resched, _need_resched(), 0); +out_unlock: task_rq_unlock(rq, &flags); } patches/preempt-realtime-sched-cpupri.patch0000664000076400007640000000113611150327144020115 0ustar tglxtglxSubject: preempt: realtime sched cpupri From: Ingo Molnar Date: Wed Feb 04 00:02:54 CET 2009 Signed-off-by: Ingo Molnar --- kernel/sched_cpupri.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/kernel/sched_cpupri.h =================================================================== --- linux-2.6-tip.orig/kernel/sched_cpupri.h +++ linux-2.6-tip/kernel/sched_cpupri.h @@ -12,7 +12,7 @@ /* values 2-101 are RT priorities 0-99 */ struct cpupri_vec { - spinlock_t lock; + raw_spinlock_t lock; int count; cpumask_var_t mask; }; patches/preempt-realtime-core.patch0000664000076400007640000007423111156703376016500 0ustar tglxtglxSubject: preempt: realtime core From: Ingo Molnar Date: Wed Feb 04 00:02:53 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/hardirq.h | 6 +- include/linux/kernel.h | 9 +++- include/linux/profile.h | 12 +++-- include/linux/radix-tree.h | 13 ++++++ include/linux/smp.h | 11 +++++ include/linux/smp_lock.h | 2 include/linux/workqueue.h | 3 + kernel/Kconfig.preempt | 91 ++++++++++++++++++++++++++++++--------------- kernel/exit.c | 20 ++++++--- kernel/fork.c | 12 +++++ kernel/futex.c | 10 +++- kernel/notifier.c | 4 - kernel/signal.c | 4 + kernel/softirq.c | 14 +++++- kernel/sys.c | 1 kernel/user.c | 4 - kernel/workqueue.c | 54 +++++++++++++++++++++++++- lib/Kconfig.debug | 4 + lib/Makefile | 3 - lib/kernel_lock.c | 14 +++++- lib/locking-selftest.c | 29 +++++++++----- lib/radix-tree.c | 6 ++ 22 files changed, 257 insertions(+), 69 deletions(-) Index: linux-2.6-tip/include/linux/hardirq.h =================================================================== --- linux-2.6-tip.orig/include/linux/hardirq.h +++ linux-2.6-tip/include/linux/hardirq.h @@ -75,9 +75,9 @@ * Are we doing bottom half or hardware interrupt processing? * Are we in a softirq context? Interrupt context? */ -#define in_irq() (hardirq_count()) -#define in_softirq() (softirq_count()) -#define in_interrupt() (irq_count()) +#define in_irq() (hardirq_count() || (current->flags & PF_HARDIRQ)) +#define in_softirq() (softirq_count() || (current->flags & PF_SOFTIRQ)) +#define in_interrupt() (irq_count()) /* * Are we in NMI context? Index: linux-2.6-tip/include/linux/kernel.h =================================================================== --- linux-2.6-tip.orig/include/linux/kernel.h +++ linux-2.6-tip/include/linux/kernel.h @@ -122,7 +122,7 @@ extern int _cond_resched(void); # define might_resched() do { } while (0) #endif -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#if defined(CONFIG_DEBUG_SPINLOCK_SLEEP) || defined(CONFIG_DEBUG_PREEMPT) void __might_sleep(char *file, int line); /** * might_sleep - annotation for functions that can sleep @@ -278,6 +278,12 @@ extern void printk_tick(void); extern void asmlinkage __attribute__((format(printf, 1, 2))) early_printk(const char *fmt, ...); +#ifdef CONFIG_PREEMPT_RT +extern void zap_rt_locks(void); +#else +# define zap_rt_locks() do { } while (0) +#endif + unsigned long int_sqrt(unsigned long); static inline void console_silent(void) @@ -306,6 +312,7 @@ extern int root_mountflags; /* Values used for system_state */ extern enum system_states { SYSTEM_BOOTING, + SYSTEM_BOOTING_SCHEDULER_OK, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, Index: linux-2.6-tip/include/linux/profile.h =================================================================== --- linux-2.6-tip.orig/include/linux/profile.h +++ linux-2.6-tip/include/linux/profile.h @@ -4,14 +4,16 @@ #include #include #include +#include #include #include -#define CPU_PROFILING 1 -#define SCHED_PROFILING 2 -#define SLEEP_PROFILING 3 -#define KVM_PROFILING 4 +#define CPU_PROFILING 1 +#define SCHED_PROFILING 2 +#define SLEEP_PROFILING 3 +#define KVM_PROFILING 4 +#define PREEMPT_PROFILING 5 struct proc_dir_entry; struct pt_regs; @@ -36,6 +38,8 @@ enum profile_type { PROFILE_MUNMAP }; +extern int prof_pid; + #ifdef CONFIG_PROFILING extern int prof_on __read_mostly; Index: linux-2.6-tip/include/linux/radix-tree.h =================================================================== --- linux-2.6-tip.orig/include/linux/radix-tree.h +++ linux-2.6-tip/include/linux/radix-tree.h @@ -167,7 +167,18 @@ radix_tree_gang_lookup_slot(struct radix unsigned long first_index, unsigned int max_items); unsigned long radix_tree_next_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); +/* + * On a mutex based kernel we can freely schedule within the radix code: + */ +#ifdef CONFIG_PREEMPT_RT +static inline int radix_tree_preload(gfp_t gfp_mask) +{ + return 0; +} +#else int radix_tree_preload(gfp_t gfp_mask); +#endif + void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); @@ -187,7 +198,9 @@ int radix_tree_tagged(struct radix_tree_ static inline void radix_tree_preload_end(void) { +#ifndef CONFIG_PREEMPT_RT preempt_enable(); +#endif } #endif /* _LINUX_RADIX_TREE_H */ Index: linux-2.6-tip/include/linux/smp.h =================================================================== --- linux-2.6-tip.orig/include/linux/smp.h +++ linux-2.6-tip/include/linux/smp.h @@ -50,6 +50,16 @@ extern void smp_send_stop(void); */ extern void smp_send_reschedule(int cpu); +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + +/* + * trigger a reschedule on all other CPUs: + */ +extern void smp_send_reschedule_allbutself(void); + /* * Prepare machine for booting other CPUs. @@ -142,6 +152,7 @@ static inline int up_smp_call_function(v 0; \ }) static inline void smp_send_reschedule(int cpu) { } +static inline void smp_send_reschedule_allbutself(void) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_mask(mask, func, info, wait) \ Index: linux-2.6-tip/include/linux/smp_lock.h =================================================================== --- linux-2.6-tip.orig/include/linux/smp_lock.h +++ linux-2.6-tip/include/linux/smp_lock.h @@ -45,7 +45,7 @@ static inline void cycle_kernel_lock(voi #define unlock_kernel() do { } while(0) #define release_kernel_lock(task) do { } while(0) #define cycle_kernel_lock() do { } while(0) -#define reacquire_kernel_lock(task) 0 +#define reacquire_kernel_lock(task) do { } while(0) #define kernel_locked() 1 #endif /* CONFIG_LOCK_KERNEL */ Index: linux-2.6-tip/include/linux/workqueue.h =================================================================== --- linux-2.6-tip.orig/include/linux/workqueue.h +++ linux-2.6-tip/include/linux/workqueue.h @@ -190,6 +190,9 @@ __create_workqueue_key(const char *name, #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) +extern void set_workqueue_prio(struct workqueue_struct *wq, int policy, + int rt_priority, int nice); + extern void destroy_workqueue(struct workqueue_struct *wq); extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); Index: linux-2.6-tip/kernel/Kconfig.preempt =================================================================== --- linux-2.6-tip.orig/kernel/Kconfig.preempt +++ linux-2.6-tip/kernel/Kconfig.preempt @@ -1,14 +1,13 @@ - choice - prompt "Preemption Model" - default PREEMPT_NONE + prompt "Preemption Mode" + default PREEMPT_RT config PREEMPT_NONE bool "No Forced Preemption (Server)" help - This is the traditional Linux preemption model, geared towards + This is the traditional Linux preemption model geared towards throughput. It will still provide good latencies most of the - time, but there are no guarantees and occasional longer delays + time but there are no guarantees and occasional long delays are possible. Select this option if you are building a kernel for a server or @@ -21,7 +20,7 @@ config PREEMPT_VOLUNTARY help This option reduces the latency of the kernel by adding more "explicit preemption points" to the kernel code. These new - preemption points have been selected to reduce the maximum + preemption points have been selected to minimize the maximum latency of rescheduling, providing faster application reactions, at the cost of slightly lower throughput. @@ -33,38 +32,73 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT +config PREEMPT_DESKTOP bool "Preemptible Kernel (Low-Latency Desktop)" help This option reduces the latency of the kernel by making - all kernel code (that is not executing in a critical section) + all kernel code that is not executing in a critical section preemptible. This allows reaction to interactive events by permitting a low priority process to be preempted involuntarily even if it is in kernel mode executing a system call and would - otherwise not be about to reach a natural preemption point. - This allows applications to run more 'smoothly' even when the - system is under load, at the cost of slightly lower throughput - and a slight runtime overhead to kernel code. + otherwise not about to reach a preemption point. This allows + applications to run more 'smoothly' even when the system is + under load, at the cost of slighly lower throughput and a + slight runtime overhead to kernel code. + + (According to profiles, when this mode is selected then even + during kernel-intense workloads the system is in an immediately + preemptible state more than 50% of the time.) Select this if you are building a kernel for a desktop or embedded system with latency requirements in the milliseconds range. +config PREEMPT_RT + bool "Complete Preemption (Real-Time)" + select PREEMPT_SOFTIRQS + select PREEMPT_HARDIRQS + select PREEMPT_RCU + select RT_MUTEXES + help + This option further reduces the scheduling latency of the + kernel by replacing almost every spinlock used by the kernel + with preemptible mutexes and thus making all but the most + critical kernel code involuntarily preemptible. The remaining + handful of lowlevel non-preemptible codepaths are short and + have a deterministic latency of a couple of tens of + microseconds (depending on the hardware). This also allows + applications to run more 'smoothly' even when the system is + under load, at the cost of lower throughput and runtime + overhead to kernel code. + + (According to profiles, when this mode is selected then even + during kernel-intense workloads the system is in an immediately + preemptible state more than 95% of the time.) + + Select this if you are building a kernel for a desktop, + embedded or real-time system with guaranteed latency + requirements of 100 usecs or lower. + endchoice +config PREEMPT + bool + default y + depends on PREEMPT_DESKTOP || PREEMPT_RT + config PREEMPT_SOFTIRQS bool "Thread Softirqs" default n # depends on PREEMPT help This option reduces the latency of the kernel by 'threading' - soft interrupts. This means that all softirqs will execute - in softirqd's context. While this helps latency, it can also - reduce performance. - - The threading of softirqs can also be controlled via - /proc/sys/kernel/softirq_preemption runtime flag and the - sofirq-preempt=0/1 boot-time option. + soft interrupts. This means that all softirqs will execute + in softirqd's context. While this helps latency, it can also + reduce performance. + + The threading of softirqs can also be controlled via + /proc/sys/kernel/softirq_preemption runtime flag and the + sofirq-preempt=0/1 boot-time option. Say N if you are unsure. @@ -75,15 +109,14 @@ config PREEMPT_HARDIRQS select PREEMPT_SOFTIRQS help This option reduces the latency of the kernel by 'threading' - hardirqs. This means that all (or selected) hardirqs will run - in their own kernel thread context. While this helps latency, - this feature can also reduce performance. - - The threading of hardirqs can also be controlled via the - /proc/sys/kernel/hardirq_preemption runtime flag and the - hardirq-preempt=0/1 boot-time option. Per-irq threading can - be enabled/disable via the /proc/irq///threaded - runtime flags. + hardirqs. This means that all (or selected) hardirqs will run + in their own kernel thread context. While this helps latency, + this feature can also reduce performance. + + The threading of hardirqs can also be controlled via the + /proc/sys/kernel/hardirq_preemption runtime flag and the + hardirq-preempt=0/1 boot-time option. Per-irq threading can + be enabled/disable via the /proc/irq///threaded + runtime flags. Say N if you are unsure. - Index: linux-2.6-tip/kernel/exit.c =================================================================== --- linux-2.6-tip.orig/kernel/exit.c +++ linux-2.6-tip/kernel/exit.c @@ -75,7 +75,9 @@ static void __unhash_process(struct task detach_pid(p, PIDTYPE_SID); list_del_rcu(&p->tasks); + preempt_disable(); __get_cpu_var(process_counts)--; + preempt_enable(); } list_del_rcu(&p->thread_group); list_del_init(&p->sibling); @@ -726,9 +728,11 @@ static void exit_mm(struct task_struct * task_lock(tsk); tsk->mm = NULL; up_read(&mm->mmap_sem); + preempt_disable(); // FIXME enter_lazy_tlb(mm, current); /* We don't want this task to be frozen prematurely */ clear_freeze_flag(tsk); + preempt_enable(); task_unlock(tsk); mm_update_next_owner(mm); mmput(mm); @@ -1118,14 +1122,17 @@ NORET_TYPE void do_exit(long code) if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); - preempt_disable(); +again: + local_irq_disable(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; - schedule(); - BUG(); - /* Avoid "noreturn function does return". */ - for (;;) - cpu_relax(); /* For when BUG is null */ + __schedule(); + printk(KERN_ERR "BUG: dead task %s:%d back from the grave!\n", + current->comm, current->pid); + printk(KERN_ERR ".... flags: %08x, count: %d, state: %08lx\n", + current->flags, atomic_read(¤t->usage), current->state); + printk(KERN_ERR ".... trying again ...\n"); + goto again; } EXPORT_SYMBOL_GPL(do_exit); @@ -1574,6 +1581,7 @@ static int wait_consider_task(struct tas int __user *stat_addr, struct rusage __user *ru) { int ret = eligible_child(type, pid, options, p); + BUG_ON(!atomic_read(&p->usage)); if (!ret) return ret; Index: linux-2.6-tip/kernel/fork.c =================================================================== --- linux-2.6-tip.orig/kernel/fork.c +++ linux-2.6-tip/kernel/fork.c @@ -176,6 +176,16 @@ void __put_task_struct(struct task_struc free_task(tsk); } +#ifdef CONFIG_PREEMPT_RT +void __put_task_struct_cb(struct rcu_head *rhp) +{ + struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); + + __put_task_struct(tsk); + +} +#endif + /* * macro override instead of weak attribute alias, to workaround * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. @@ -1241,11 +1251,13 @@ static struct task_struct *copy_process( * to ensure it is on a valid CPU (and if not, just force it back to * parent's CPU). This avoids alot of nasty races. */ + preempt_disable(); p->cpus_allowed = current->cpus_allowed; p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || !cpu_online(task_cpu(p)))) set_task_cpu(p, smp_processor_id()); + preempt_enable(); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { Index: linux-2.6-tip/kernel/futex.c =================================================================== --- linux-2.6-tip.orig/kernel/futex.c +++ linux-2.6-tip/kernel/futex.c @@ -867,7 +867,7 @@ retry_private: plist_del(&this->list, &hb1->chain); plist_add(&this->list, &hb2->chain); this->lock_ptr = &hb2->lock; -#ifdef CONFIG_DEBUG_PI_LIST +#if defined(CONFIG_DEBUG_PI_LIST) && !defined(CONFIG_PREEMPT_RT) this->list.plist.lock = &hb2->lock; #endif } @@ -925,7 +925,7 @@ static inline void queue_me(struct futex prio = min(current->normal_prio, MAX_RT_PRIO); plist_node_init(&q->list, prio); -#ifdef CONFIG_DEBUG_PI_LIST +#if defined(CONFIG_DEBUG_PI_LIST) && !defined(CONFIG_PREEMPT_RT) q->list.plist.lock = &hb->lock; #endif plist_add(&q->list, &hb->chain); @@ -1203,6 +1203,10 @@ retry_private: * q.lock_ptr != 0 is not safe, because of ordering against wakeup. */ if (likely(!plist_node_empty(&q.list))) { + unsigned long nosched_flag = current->flags & PF_NOSCHED; + + current->flags &= ~PF_NOSCHED; + if (!abs_time) schedule(); else { @@ -1233,6 +1237,8 @@ retry_private: destroy_hrtimer_on_stack(&t.timer); } + + current->flags |= nosched_flag; } __set_current_state(TASK_RUNNING); Index: linux-2.6-tip/kernel/notifier.c =================================================================== --- linux-2.6-tip.orig/kernel/notifier.c +++ linux-2.6-tip/kernel/notifier.c @@ -71,7 +71,7 @@ static int notifier_chain_unregister(str * @returns: notifier_call_chain returns the value returned by the * last notifier function called. */ -static int __kprobes notifier_call_chain(struct notifier_block **nl, +static int __kprobes notrace notifier_call_chain(struct notifier_block **nl, unsigned long val, void *v, int nr_to_call, int *nr_calls) { @@ -217,7 +217,7 @@ int blocking_notifier_chain_register(str * not yet working and interrupts must remain disabled. At * such times we must not call down_write(). */ - if (unlikely(system_state == SYSTEM_BOOTING)) + if (unlikely(system_state < SYSTEM_RUNNING)) return notifier_chain_register(&nh->head, n); down_write(&nh->rwsem); Index: linux-2.6-tip/kernel/signal.c =================================================================== --- linux-2.6-tip.orig/kernel/signal.c +++ linux-2.6-tip/kernel/signal.c @@ -821,7 +821,9 @@ static int send_signal(int sig, struct s trace_sched_signal_send(sig, t); +#ifdef CONFIG_SMP assert_spin_locked(&t->sighand->siglock); +#endif if (!prepare_signal(sig, t)) return 0; @@ -1576,6 +1578,7 @@ static void ptrace_stop(int exit_code, i if (may_ptrace_stop()) { do_notify_parent_cldstop(current, CLD_TRAPPED); read_unlock(&tasklist_lock); + current->flags &= ~PF_NOSCHED; schedule(); } else { /* @@ -1644,6 +1647,7 @@ finish_stop(int stop_count) } do { + current->flags &= ~PF_NOSCHED; schedule(); } while (try_to_freeze()); /* Index: linux-2.6-tip/kernel/softirq.c =================================================================== --- linux-2.6-tip.orig/kernel/softirq.c +++ linux-2.6-tip/kernel/softirq.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -133,6 +134,8 @@ static void trigger_softirqs(void) } } +#ifndef CONFIG_PREEMPT_RT + /* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: @@ -234,6 +237,8 @@ void local_bh_enable_ip(unsigned long ip } EXPORT_SYMBOL(local_bh_enable_ip); +#endif + /* * We restart softirq processing MAX_SOFTIRQ_RESTART times, * and we fall back to softirqd after that. @@ -633,7 +638,7 @@ void tasklet_kill(struct tasklet_struct while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do - yield(); + msleep(1); while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); @@ -1041,6 +1046,11 @@ int softirq_preemption = 1; EXPORT_SYMBOL(softirq_preemption); +/* + * Real-Time Preemption depends on softirq threading: + */ +#ifndef CONFIG_PREEMPT_RT + static int __init softirq_preempt_setup (char *str) { if (!strncmp(str, "off", 3)) @@ -1054,7 +1064,7 @@ static int __init softirq_preempt_setup } __setup("softirq-preempt=", softirq_preempt_setup); - +#endif #endif #ifdef CONFIG_SMP Index: linux-2.6-tip/kernel/sys.c =================================================================== --- linux-2.6-tip.orig/kernel/sys.c +++ linux-2.6-tip/kernel/sys.c @@ -38,6 +38,7 @@ #include #include +#include #include #include Index: linux-2.6-tip/kernel/user.c =================================================================== --- linux-2.6-tip.orig/kernel/user.c +++ linux-2.6-tip/kernel/user.c @@ -405,11 +405,11 @@ void free_uid(struct user_struct *up) if (!up) return; - local_irq_save(flags); + local_irq_save_nort(flags); if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else - local_irq_restore(flags); + local_irq_restore_nort(flags); } struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) Index: linux-2.6-tip/kernel/workqueue.c =================================================================== --- linux-2.6-tip.orig/kernel/workqueue.c +++ linux-2.6-tip/kernel/workqueue.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,8 @@ #include #include +#include + /* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). @@ -162,13 +165,14 @@ static void __queue_work(struct cpu_work * * We queue the work to the CPU on which it was submitted, but if the CPU dies * it can be processed by another CPU. + * + * Especially no such guarantee on PREEMPT_RT. */ int queue_work(struct workqueue_struct *wq, struct work_struct *work) { - int ret; + int ret = 0, cpu = raw_smp_processor_id(); - ret = queue_work_on(get_cpu(), wq, work); - put_cpu(); + ret = queue_work_on(cpu, wq, work); return ret; } @@ -909,6 +913,49 @@ static void cleanup_workqueue_thread(str cwq->thread = NULL; } +void set_workqueue_thread_prio(struct workqueue_struct *wq, int cpu, + int policy, int rt_priority, int nice) +{ + struct sched_param param = { .sched_priority = rt_priority }; + struct cpu_workqueue_struct *cwq; + mm_segment_t oldfs = get_fs(); + struct task_struct *p; + unsigned long flags; + int ret; + + cwq = per_cpu_ptr(wq->cpu_wq, cpu); + spin_lock_irqsave(&cwq->lock, flags); + p = cwq->thread; + spin_unlock_irqrestore(&cwq->lock, flags); + + set_user_nice(p, nice); + + set_fs(KERNEL_DS); + ret = sys_sched_setscheduler(p->pid, policy, ¶m); + set_fs(oldfs); + + WARN_ON(ret); +} + +void set_workqueue_prio(struct workqueue_struct *wq, int policy, + int rt_priority, int nice) +{ + int cpu; + + /* We don't need the distraction of CPUs appearing and vanishing. */ + get_online_cpus(); + spin_lock(&workqueue_lock); + if (is_wq_single_threaded(wq)) + set_workqueue_thread_prio(wq, 0, policy, rt_priority, nice); + else { + for_each_online_cpu(cpu) + set_workqueue_thread_prio(wq, cpu, policy, + rt_priority, nice); + } + spin_unlock(&workqueue_lock); + put_online_cpus(); +} + /** * destroy_workqueue - safely terminate a workqueue * @wq: target workqueue @@ -1035,6 +1082,7 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); + set_workqueue_prio(keventd_wq, SCHED_FIFO, 1, -20); #ifdef CONFIG_SMP work_on_cpu_wq = create_workqueue("work_on_cpu"); BUG_ON(!work_on_cpu_wq); Index: linux-2.6-tip/lib/Kconfig.debug =================================================================== --- linux-2.6-tip.orig/lib/Kconfig.debug +++ linux-2.6-tip/lib/Kconfig.debug @@ -364,6 +364,8 @@ config DEBUG_RT_MUTEXES help This allows rt mutex semantics violations and rt mutex related deadlocks (lockups) to be detected and reported automatically. + When realtime preemption is enabled this includes spinlocks, + rwlocks, mutexes and (rw)semaphores config DEBUG_PI_LIST bool @@ -387,7 +389,7 @@ config DEBUG_SPINLOCK config DEBUG_MUTEXES bool "Mutex debugging: basic checks" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !PREEMPT_RT help This feature allows mutex semantics violations to be detected and reported. Index: linux-2.6-tip/lib/Makefile =================================================================== --- linux-2.6-tip.orig/lib/Makefile +++ linux-2.6-tip/lib/Makefile @@ -34,7 +34,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o -lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o +obj-$(CONFIG_PREEMPT_RT) += plist.o +obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o Index: linux-2.6-tip/lib/kernel_lock.c =================================================================== --- linux-2.6-tip.orig/lib/kernel_lock.c +++ linux-2.6-tip/lib/kernel_lock.c @@ -35,6 +35,8 @@ DECLARE_MUTEX(kernel_sem); * about recursion, both due to the down() and due to the enabling of * preemption. schedule() will re-check the preemption flag after * reacquiring the semaphore. + * + * Called with interrupts disabled. */ int __lockfunc __reacquire_kernel_lock(void) { @@ -68,11 +70,15 @@ void __lockfunc lock_kernel(void) struct task_struct *task = current; int depth = task->lock_depth + 1; - if (likely(!depth)) + if (likely(!depth)) { /* * No recursion worries - we set up lock_depth _after_ */ down(&kernel_sem); +#ifdef CONFIG_DEBUG_RT_MUTEXES + current->last_kernel_lock = __builtin_return_address(0); +#endif + } task->lock_depth = depth; } @@ -83,8 +89,12 @@ void __lockfunc unlock_kernel(void) BUG_ON(task->lock_depth < 0); - if (likely(--task->lock_depth < 0)) + if (likely(--task->lock_depth == -1)) { +#ifdef CONFIG_DEBUG_RT_MUTEXES + current->last_kernel_lock = NULL; +#endif up(&kernel_sem); + } } EXPORT_SYMBOL(lock_kernel); Index: linux-2.6-tip/lib/locking-selftest.c =================================================================== --- linux-2.6-tip.orig/lib/locking-selftest.c +++ linux-2.6-tip/lib/locking-selftest.c @@ -158,7 +158,7 @@ static void init_shared_classes(void) local_bh_disable(); \ local_irq_disable(); \ lockdep_softirq_enter(); \ - WARN_ON(!in_softirq()); + /* FIXME: preemptible softirqs. WARN_ON(!in_softirq()); */ #define SOFTIRQ_EXIT() \ lockdep_softirq_exit(); \ @@ -550,6 +550,11 @@ GENERATE_TESTCASE(init_held_rsem) #undef E /* + * FIXME: turns these into raw-spinlock tests on -rt + */ +#ifndef CONFIG_PREEMPT_RT + +/* * locking an irq-safe lock with irqs enabled: */ #define E1() \ @@ -890,6 +895,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_ #include "locking-selftest-softirq.h" // GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft) +#endif /* !CONFIG_PREEMPT_RT */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) # define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) @@ -1004,7 +1011,7 @@ static inline void print_testname(const #define DO_TESTCASE_1(desc, name, nr) \ print_testname(desc"/"#nr); \ - dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ + dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); #define DO_TESTCASE_1B(desc, name, nr) \ @@ -1012,17 +1019,17 @@ static inline void print_testname(const dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \ printk("\n"); -#define DO_TESTCASE_3(desc, name, nr) \ - print_testname(desc"/"#nr); \ - dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ - dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ +#define DO_TESTCASE_3(desc, name, nr) \ + print_testname(desc"/"#nr); \ + dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \ + dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); -#define DO_TESTCASE_3RW(desc, name, nr) \ - print_testname(desc"/"#nr); \ +#define DO_TESTCASE_3RW(desc, name, nr) \ + print_testname(desc"/"#nr); \ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\ - dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ + dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \ printk("\n"); @@ -1053,7 +1060,7 @@ static inline void print_testname(const print_testname(desc); \ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \ - dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \ + dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ @@ -1185,6 +1192,7 @@ void locking_selftest(void) /* * irq-context testcases: */ +#ifndef CONFIG_PREEMPT_RT DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); @@ -1194,6 +1202,7 @@ void locking_selftest(void) DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); +#endif if (unexpected_testcase_failures) { printk("-----------------------------------------------------------------\n"); Index: linux-2.6-tip/lib/radix-tree.c =================================================================== --- linux-2.6-tip.orig/lib/radix-tree.c +++ linux-2.6-tip/lib/radix-tree.c @@ -157,12 +157,14 @@ radix_tree_node_alloc(struct radix_tree_ * succeed in getting a node here (and never reach * kmem_cache_alloc) */ + rtp = &get_cpu_var(radix_tree_preloads); rtp = &__get_cpu_var(radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; rtp->nodes[rtp->nr - 1] = NULL; rtp->nr--; } + put_cpu_var(radix_tree_preloads); } if (ret == NULL) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); @@ -195,6 +197,8 @@ radix_tree_node_free(struct radix_tree_n call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } +#ifndef CONFIG_PREEMPT_RT + /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On @@ -227,6 +231,8 @@ out: } EXPORT_SYMBOL(radix_tree_preload); +#endif + /* * Return the maximum key which can be store into a * radix tree with height HEIGHT. patches/fix-net-bug-fixes.patch0000664000076400007640000000430311160544603015517 0ustar tglxtglxSubject: patches/fix-net-bug-fixes.patch MUST-FIX: check the skbuff.c bit! MUST-FIX: check the sched.c bit! This doesn't look good. You declare it as a PER_CPU_LOCKED, but then never use the extra lock to synchronize data. Given that sock_proc_inuse_get() is a racy read anyway, the 'right' fix would be to do something like: Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 ++ net/core/skbuff.c | 2 +- net/core/sock.c | 7 +++++-- 3 files changed, 8 insertions(+), 3 deletions(-) Index: linux-2.6-tip/kernel/sched.c =================================================================== --- linux-2.6-tip.orig/kernel/sched.c +++ linux-2.6-tip/kernel/sched.c @@ -6348,6 +6348,8 @@ int __sched cond_resched_softirq(void) { #ifndef CONFIG_PREEMPT_RT WARN_ON_ONCE(!in_softirq()); + if (!in_softirq()) + return 0; #endif if (need_resched() && system_state == SYSTEM_RUNNING) { local_bh_enable(); Index: linux-2.6-tip/net/core/skbuff.c =================================================================== --- linux-2.6-tip.orig/net/core/skbuff.c +++ linux-2.6-tip/net/core/skbuff.c @@ -383,7 +383,7 @@ static void skb_release_head_state(struc secpath_put(skb->sp); #endif if (skb->destructor) { - WARN_ON(in_irq()); +// WARN_ON(in_irq()); skb->destructor(skb); } #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) Index: linux-2.6-tip/net/core/sock.c =================================================================== --- linux-2.6-tip.orig/net/core/sock.c +++ linux-2.6-tip/net/core/sock.c @@ -1949,8 +1949,9 @@ static DECLARE_BITMAP(proto_inuse_idx, P #ifdef CONFIG_NET_NS void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { - int cpu = smp_processor_id(); + int cpu = get_cpu(); per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val; + put_cpu(); } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); @@ -1996,7 +1997,9 @@ static DEFINE_PER_CPU(struct prot_inuse, void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { - __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; + int cpu = get_cpu(); + per_cpu(prot_inuse, cpu).val[prot->inuse_idx] += val; + put_cpu(); } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); patches/resurrect-softirq-code.patch0000664000076400007640000002644611156711144016706 0ustar tglxtglxSubject: resurrect: softirq code From: Ingo Molnar Date: Wed Feb 04 06:41:19 CET 2009 Note: net/ipv4/proc.c is a fix for upstream too. Signed-off-by: Ingo Molnar --- include/linux/bottom_half.h | 2 kernel/softirq.c | 206 +++++++++++++++++++++++++++++++------------- net/ipv4/proc.c | 4 3 files changed, 152 insertions(+), 60 deletions(-) Index: linux-2.6-tip/include/linux/bottom_half.h =================================================================== --- linux-2.6-tip.orig/include/linux/bottom_half.h +++ linux-2.6-tip/include/linux/bottom_half.h @@ -1,7 +1,7 @@ #ifndef _LINUX_BH_H #define _LINUX_BH_H -#ifdef CONFIG_PREEMPT_RT +#ifdef CONFIG_PREEMPT_HARDIRQS # define local_bh_disable() do { } while (0) # define __local_bh_disable(ip) do { } while (0) # define _local_bh_enable() do { } while (0) Index: linux-2.6-tip/kernel/softirq.c =================================================================== --- linux-2.6-tip.orig/kernel/softirq.c +++ linux-2.6-tip/kernel/softirq.c @@ -134,7 +134,7 @@ static void trigger_softirqs(void) } } -#ifndef CONFIG_PREEMPT_RT +#ifndef CONFIG_PREEMPT_HARDIRQS /* * This one is for softirq.c-internal use, @@ -188,7 +188,6 @@ EXPORT_SYMBOL(local_bh_disable); */ void _local_bh_enable(void) { - WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == SOFTIRQ_OFFSET) @@ -198,17 +197,22 @@ void _local_bh_enable(void) EXPORT_SYMBOL(_local_bh_enable); -static inline void _local_bh_enable_ip(unsigned long ip) +void local_bh_enable(void) { - WARN_ON_ONCE(in_irq() || irqs_disabled()); #ifdef CONFIG_TRACE_IRQFLAGS - local_irq_disable(); + unsigned long flags; + + WARN_ON_ONCE(in_irq()); +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS + local_irq_save(flags); #endif /* * Are softirqs going to be turned on now: */ if (softirq_count() == SOFTIRQ_OFFSET) - trace_softirqs_on(ip); + trace_softirqs_on((unsigned long)__builtin_return_address(0)); /* * Keep preemption disabled until we are done with * softirq processing: @@ -220,20 +224,40 @@ static inline void _local_bh_enable_ip(u dec_preempt_count(); #ifdef CONFIG_TRACE_IRQFLAGS - local_irq_enable(); + local_irq_restore(flags); #endif preempt_check_resched(); } - -void local_bh_enable(void) -{ - _local_bh_enable_ip((unsigned long)__builtin_return_address(0)); -} EXPORT_SYMBOL(local_bh_enable); void local_bh_enable_ip(unsigned long ip) { - _local_bh_enable_ip(ip); +#ifdef CONFIG_TRACE_IRQFLAGS + unsigned long flags; + + WARN_ON_ONCE(in_irq()); + + local_irq_save(flags); +#endif + /* + * Are softirqs going to be turned on now: + */ + if (softirq_count() == SOFTIRQ_OFFSET) + trace_softirqs_on(ip); + /* + * Keep preemption disabled until we are done with + * softirq processing: + */ + sub_preempt_count(SOFTIRQ_OFFSET - 1); + + if (unlikely(!in_interrupt() && local_softirq_pending())) + do_softirq(); + + dec_preempt_count(); +#ifdef CONFIG_TRACE_IRQFLAGS + local_irq_restore(flags); +#endif + preempt_check_resched(); } EXPORT_SYMBOL(local_bh_enable_ip); @@ -248,58 +272,116 @@ EXPORT_SYMBOL(local_bh_enable_ip); * we want to handle softirqs as soon as possible, but they * should not be able to lock up the box. */ -#define MAX_SOFTIRQ_RESTART 10 +#define MAX_SOFTIRQ_RESTART 20 DEFINE_TRACE(softirq_entry); DEFINE_TRACE(softirq_exit); -asmlinkage void ___do_softirq(void) +static DEFINE_PER_CPU(u32, softirq_running); + +/* + * Debug check for leaking preempt counts in h->action handlers: + */ + +static inline void debug_check_preempt_count_start(__u32 *preempt_count) { - struct softirq_action *h; - __u32 pending; +#ifdef CONFIG_DEBUG_PREEMPT + *preempt_count = preempt_count(); +#endif +} + +static inline void + debug_check_preempt_count_stop(__u32 *preempt_count, struct softirq_action *h) +{ +#ifdef CONFIG_DEBUG_PREEMPT + if (*preempt_count == preempt_count()) + return; + + print_symbol("BUG: %Ps exited with wrong preemption count!\n", + (unsigned long)h->action); + printk("=> enter: %08x, exit: %08x.\n", *preempt_count, preempt_count()); + preempt_count() = *preempt_count; +#endif +} + +/* + * Execute softirq handlers: + */ +static void ___do_softirq(const int same_prio_only) +{ + __u32 pending, available_mask, same_prio_skipped, preempt_count; int max_restart = MAX_SOFTIRQ_RESTART; - int cpu; + struct softirq_action *h; + int cpu, softirq; pending = local_softirq_pending(); account_system_vtime(current); cpu = smp_processor_id(); restart: + available_mask = -1; + softirq = 0; + same_prio_skipped = 0; /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - local_irq_enable(); - h = softirq_vec; do { - if (pending & 1) { - int prev_count = preempt_count(); + u32 softirq_mask = 1 << softirq; - trace_softirq_entry(h, softirq_vec); - h->action(h); - trace_softirq_exit(h, softirq_vec); - if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %td %s %p" - "with preempt_count %08x," - " exited with %08x?\n", h - softirq_vec, - softirq_to_name[h - softirq_vec], - h->action, prev_count, preempt_count()); - preempt_count() = prev_count; - } + if (!(pending & 1)) + goto next; - rcu_bh_qsctr_inc(cpu); - cond_resched_softirq_context(); + debug_check_preempt_count_start(&preempt_count); + +#if defined(CONFIG_PREEMPT_SOFTIRQS) && defined(CONFIG_PREEMPT_HARDIRQS) + /* + * If executed by a same-prio hardirq thread + * then skip pending softirqs that belong + * to softirq threads with different priority: + */ + if (same_prio_only) { + struct task_struct *tsk; + + tsk = __get_cpu_var(ksoftirqd)[softirq].tsk; + if (tsk && tsk->normal_prio != current->normal_prio) { + same_prio_skipped |= softirq_mask; + available_mask &= ~softirq_mask; + goto next; + } } +#endif + /* + * Is this softirq already being processed? + */ + if (per_cpu(softirq_running, cpu) & softirq_mask) { + available_mask &= ~softirq_mask; + goto next; + } + per_cpu(softirq_running, cpu) |= softirq_mask; + local_irq_enable(); + + h->action(h); + + debug_check_preempt_count_stop(&preempt_count, h); + + rcu_bh_qsctr_inc(cpu); + cond_resched_softirq_context(); + local_irq_disable(); + per_cpu(softirq_running, cpu) &= ~softirq_mask; +next: h++; + softirq++; pending >>= 1; } while (pending); - local_irq_disable(); - + or_softirq_pending(same_prio_skipped); pending = local_softirq_pending(); - if (pending && --max_restart) - goto restart; + if (pending & available_mask) { + if (--max_restart) + goto restart; + } if (pending) trigger_softirqs(); @@ -323,7 +405,7 @@ asmlinkage void __do_softirq(void) __local_bh_disable((unsigned long)__builtin_return_address(0)); lockdep_softirq_enter(); - ___do_softirq(); + ___do_softirq(0); lockdep_softirq_exit(); @@ -480,7 +562,7 @@ void __tasklet_hi_schedule(struct taskle unsigned long flags; local_irq_save(flags); - __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), HI_SOFTIRQ); + __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ); local_irq_restore(flags); } @@ -488,11 +570,7 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); void __tasklet_hi_schedule_first(struct tasklet_struct *t) { - BUG_ON(!irqs_disabled()); - - t->next = __get_cpu_var(tasklet_hi_vec).head; - __get_cpu_var(tasklet_hi_vec).head = t; - __raise_softirq_irqoff(HI_SOFTIRQ); + __tasklet_hi_schedule(t); } EXPORT_SYMBOL(__tasklet_hi_schedule_first); @@ -612,7 +690,7 @@ static void tasklet_hi_action(struct sof local_irq_disable(); list = __get_cpu_var(tasklet_hi_vec).head; __get_cpu_var(tasklet_hi_vec).head = NULL; - __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_vec).head; + __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; local_irq_enable(); __tasklet_action(a, list); @@ -812,8 +890,9 @@ static int ksoftirqd(void * __data) { struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2 }; struct softirqdata *data = __data; - u32 mask = (1 << data->nr); + u32 softirq_mask = (1 << data->nr); struct softirq_action *h; + int cpu = data->cpu; #ifdef CONFIG_PREEMPT_SOFTIRQS init_waitqueue_head(&data->wait); @@ -825,7 +904,8 @@ static int ksoftirqd(void * __data) while (!kthread_should_stop()) { preempt_disable(); - if (!(local_softirq_pending() & mask)) { + if (!(local_softirq_pending() & softirq_mask)) { +sleep_more: __preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -837,16 +917,26 @@ static int ksoftirqd(void * __data) data->running = 1; #endif - while (local_softirq_pending() & mask) { + while (local_softirq_pending() & softirq_mask) { /* Preempt disable stops cpu going offline. If already offline, we'll be on wrong CPU: don't process */ - if (cpu_is_offline(data->cpu)) + if (cpu_is_offline(cpu)) goto wait_to_die; local_irq_disable(); + /* + * Is the softirq already being executed by + * a hardirq context? + */ + if (per_cpu(softirq_running, cpu) & softirq_mask) { + local_irq_enable(); + set_current_state(TASK_INTERRUPTIBLE); + goto sleep_more; + } + per_cpu(softirq_running, cpu) |= softirq_mask; __preempt_enable_no_resched(); - set_softirq_pending(local_softirq_pending() & ~mask); + set_softirq_pending(local_softirq_pending() & ~softirq_mask); local_bh_disable(); local_irq_enable(); @@ -856,6 +946,7 @@ static int ksoftirqd(void * __data) rcu_bh_qsctr_inc(data->cpu); local_irq_disable(); + per_cpu(softirq_running, cpu) &= ~softirq_mask; _local_bh_enable(); local_irq_enable(); @@ -977,7 +1068,7 @@ static int __cpuinit cpu_callback(struct for (i = 0; i < MAX_SOFTIRQ; i++) { p = kthread_create(ksoftirqd, &per_cpu(ksoftirqd, hotcpu)[i], - "softirq-%s/%d", softirq_names[i], + "sirq-%s/%d", softirq_names[i], hotcpu); if (IS_ERR(p)) { printk("ksoftirqd %d for %i failed\n", i, @@ -1002,22 +1093,23 @@ static int __cpuinit cpu_callback(struct if (!per_cpu(ksoftirqd, hotcpu)[i].tsk) continue; kthread_bind(per_cpu(ksoftirqd, hotcpu)[i].tsk, - cpumask_any(cpu_online_mask)); + any_online_cpu(cpu_online_map)); } #endif case CPU_DEAD: case CPU_DEAD_FROZEN: { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + struct sched_param param; for (i = 0; i < MAX_SOFTIRQ; i++) { + param.sched_priority = MAX_RT_PRIO-1; p = per_cpu(ksoftirqd, hotcpu)[i].tsk; + sched_setscheduler(p, SCHED_FIFO, ¶m); per_cpu(ksoftirqd, hotcpu)[i].tsk = NULL; - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); kthread_stop(p); } takeover_tasklets(hotcpu); break; - } + } #endif /* CONFIG_HOTPLUG_CPU */ } return NOTIFY_OK; Index: linux-2.6-tip/net/ipv4/proc.c =================================================================== --- linux-2.6-tip.orig/net/ipv4/proc.c +++ linux-2.6-tip/net/ipv4/proc.c @@ -54,8 +54,8 @@ static int sockstat_seq_show(struct seq_ int orphans, sockets; local_bh_disable(); - orphans = percpu_counter_sum_positive(&tcp_orphan_count), - sockets = percpu_counter_sum_positive(&tcp_sockets_allocated), + orphans = percpu_counter_sum_positive(&tcp_orphan_count); + sockets = percpu_counter_sum_positive(&tcp_sockets_allocated); local_bh_enable(); socket_seq_show(seq); patches/preempt-realtime-net.patch0000664000076400007640000004047511156214140016323 0ustar tglxtglxSubject: preempt: realtime net From: Ingo Molnar Date: Wed Feb 04 00:02:46 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/netdevice.h | 8 ++--- include/net/dn_dev.h | 6 +-- net/core/dev.c | 41 +++++++++++++++++++++++--- net/core/netpoll.c | 62 +++++++++++++++++++++++++--------------- net/decnet/dn_dev.c | 44 ++++++++++++++-------------- net/ipv4/icmp.c | 5 ++- net/ipv4/route.c | 4 +- net/ipv6/netfilter/ip6_tables.c | 2 - net/sched/sch_generic.c | 13 +++++--- 9 files changed, 121 insertions(+), 64 deletions(-) Index: linux-2.6-tip/include/linux/netdevice.h =================================================================== --- linux-2.6-tip.orig/include/linux/netdevice.h +++ linux-2.6-tip/include/linux/netdevice.h @@ -1634,14 +1634,14 @@ static inline void __netif_tx_lock(struc static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - txq->xmit_lock_owner = smp_processor_id(); + txq->xmit_lock_owner = raw_smp_processor_id(); } static inline int __netif_tx_trylock(struct netdev_queue *txq) { int ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); + txq->xmit_lock_owner = raw_smp_processor_id(); return ok; } @@ -1669,7 +1669,7 @@ static inline void netif_tx_lock(struct int cpu; spin_lock(&dev->tx_global_lock); - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -1733,7 +1733,7 @@ static inline void netif_tx_disable(stru int cpu; local_bh_disable(); - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); Index: linux-2.6-tip/include/net/dn_dev.h =================================================================== --- linux-2.6-tip.orig/include/net/dn_dev.h +++ linux-2.6-tip/include/net/dn_dev.h @@ -76,9 +76,9 @@ struct dn_dev_parms { int priority; /* Priority to be a router */ char *name; /* Name for sysctl */ int ctl_name; /* Index for sysctl */ - int (*up)(struct net_device *); - void (*down)(struct net_device *); - void (*timer3)(struct net_device *, struct dn_ifaddr *ifa); + int (*dn_up)(struct net_device *); + void (*dn_down)(struct net_device *); + void (*dn_timer3)(struct net_device *, struct dn_ifaddr *ifa); void *sysctl; }; Index: linux-2.6-tip/net/core/dev.c =================================================================== --- linux-2.6-tip.orig/net/core/dev.c +++ linux-2.6-tip/net/core/dev.c @@ -1879,9 +1879,16 @@ gso: Either shot noqueue qdisc, it is even simpler 8) */ if (dev->flags & IFF_UP) { - int cpu = smp_processor_id(); /* ok because BHs are off */ + int cpu = raw_smp_processor_id(); /* ok because BHs are off */ + /* + * No need to check for recursion with threaded interrupts: + */ +#ifdef CONFIG_PREEMPT_RT + if (1) { +#else if (txq->xmit_lock_owner != cpu) { +#endif HARD_TX_LOCK(dev, txq, cpu); @@ -1999,7 +2006,8 @@ EXPORT_SYMBOL(netif_rx_ni); static void net_tx_action(struct softirq_action *h) { - struct softnet_data *sd = &__get_cpu_var(softnet_data); + struct softnet_data *sd = &per_cpu(softnet_data, + raw_smp_processor_id()); if (sd->completion_queue) { struct sk_buff *clist; @@ -2015,6 +2023,11 @@ static void net_tx_action(struct softirq WARN_ON(atomic_read(&skb->users)); __kfree_skb(skb); + /* + * Safe to reschedule - the list is private + * at this point. + */ + cond_resched_softirq_context(); } } @@ -2033,6 +2046,22 @@ static void net_tx_action(struct softirq head = head->next_sched; root_lock = qdisc_lock(q); + /* + * We are executing in softirq context here, and + * if softirqs are preemptible, we must avoid + * infinite reactivation of the softirq by + * either the tx handler, or by netif_schedule(). + * (it would result in an infinitely looping + * softirq context) + * So we take the spinlock unconditionally. + */ +#ifdef CONFIG_PREEMPT_SOFTIRQS + spin_lock(root_lock); + smp_mb__before_clear_bit(); + clear_bit(__QDISC_STATE_SCHED, &q->state); + qdisc_run(q); + spin_unlock(root_lock); +#else if (spin_trylock(root_lock)) { smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, @@ -2049,6 +2078,7 @@ static void net_tx_action(struct softirq &q->state); } } +#endif } } } @@ -2257,7 +2287,7 @@ int netif_receive_skb(struct sk_buff *sk skb->dev = orig_dev->master; } - __get_cpu_var(netdev_rx_stat).total++; + per_cpu(netdev_rx_stat, raw_smp_processor_id()).total++; skb_reset_network_header(skb); skb_reset_transport_header(skb); @@ -2578,9 +2608,10 @@ EXPORT_SYMBOL(napi_gro_frags); static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; - struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct softnet_data *queue; unsigned long start_time = jiffies; + queue = &per_cpu(softnet_data, raw_smp_processor_id()); napi->weight = weight_p; do { struct sk_buff *skb; @@ -2614,7 +2645,7 @@ void __napi_schedule(struct napi_struct local_irq_save(flags); list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + raise_softirq_irqoff(NET_RX_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__napi_schedule); Index: linux-2.6-tip/net/core/netpoll.c =================================================================== --- linux-2.6-tip.orig/net/core/netpoll.c +++ linux-2.6-tip/net/core/netpoll.c @@ -68,20 +68,20 @@ static void queue_process(struct work_st txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - local_irq_save(flags); + local_irq_save_nort(flags); __netif_tx_lock(txq, smp_processor_id()); if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq) || ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { skb_queue_head(&npinfo->txq, skb); __netif_tx_unlock(txq); - local_irq_restore(flags); + local_irq_restore_nort(flags); schedule_delayed_work(&npinfo->tx_work, HZ/10); return; } __netif_tx_unlock(txq); - local_irq_restore(flags); + local_irq_restore_nort(flags); } } @@ -151,7 +151,7 @@ static void poll_napi(struct net_device int budget = 16; list_for_each_entry(napi, &dev->napi_list, dev_list) { - if (napi->poll_owner != smp_processor_id() && + if (napi->poll_owner != raw_smp_processor_id() && spin_trylock(&napi->poll_lock)) { budget = poll_one_napi(dev->npinfo, napi, budget); spin_unlock(&napi->poll_lock); @@ -208,30 +208,35 @@ static void refill_skbs(void) static void zap_completion_queue(void) { - unsigned long flags; struct softnet_data *sd = &get_cpu_var(softnet_data); + struct sk_buff *clist = NULL; + unsigned long flags; if (sd->completion_queue) { - struct sk_buff *clist; local_irq_save(flags); clist = sd->completion_queue; sd->completion_queue = NULL; local_irq_restore(flags); - - while (clist != NULL) { - struct sk_buff *skb = clist; - clist = clist->next; - if (skb->destructor) { - atomic_inc(&skb->users); - dev_kfree_skb_any(skb); /* put this one back */ - } else { - __kfree_skb(skb); - } - } } + + /* + * Took the list private, can drop our softnet + * reference: + */ put_cpu_var(softnet_data); + + while (clist != NULL) { + struct sk_buff *skb = clist; + clist = clist->next; + if (skb->destructor) { + atomic_inc(&skb->users); + dev_kfree_skb_any(skb); /* put this one back */ + } else { + __kfree_skb(skb); + } + } } static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) @@ -239,13 +244,26 @@ static struct sk_buff *find_skb(struct n int count = 0; struct sk_buff *skb; +#ifdef CONFIG_PREEMPT_RT + /* + * On -rt skb_pool.lock is schedulable, so if we are + * in an atomic context we just try to dequeue from the + * pool and fail if we cannot get one. + */ + if (in_atomic() || irqs_disabled()) + goto pick_atomic; +#endif zap_completion_queue(); refill_skbs(); repeat: skb = alloc_skb(len, GFP_ATOMIC); - if (!skb) + if (!skb) { +#ifdef CONFIG_PREEMPT_RT +pick_atomic: +#endif skb = skb_dequeue(&skb_pool); + } if (!skb) { if (++count < 10) { @@ -265,7 +283,7 @@ static int netpoll_owner_active(struct n struct napi_struct *napi; list_for_each_entry(napi, &dev->napi_list, dev_list) { - if (napi->poll_owner == smp_processor_id()) + if (napi->poll_owner == raw_smp_processor_id()) return 1; } return 0; @@ -291,7 +309,7 @@ static void netpoll_send_skb(struct netp txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - local_irq_save(flags); + local_irq_save_nort(flags); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { @@ -310,7 +328,7 @@ static void netpoll_send_skb(struct netp udelay(USEC_PER_POLL); } - local_irq_restore(flags); + local_irq_restore_nort(flags); } if (status != NETDEV_TX_OK) { @@ -731,7 +749,7 @@ int netpoll_setup(struct netpoll *np) np->name); break; } - cond_resched(); + schedule_timeout_uninterruptible(1); } /* If carrier appears to come up instantly, we don't Index: linux-2.6-tip/net/decnet/dn_dev.c =================================================================== --- linux-2.6-tip.orig/net/decnet/dn_dev.c +++ linux-2.6-tip/net/decnet/dn_dev.c @@ -90,9 +90,9 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 10, .name = "ethernet", .ctl_name = NET_DECNET_CONF_ETHER, - .up = dn_eth_up, - .down = dn_eth_down, - .timer3 = dn_send_brd_hello, + .dn_up = dn_eth_up, + .dn_down = dn_eth_down, + .dn_timer3 = dn_send_brd_hello, }, { .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */ @@ -102,7 +102,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 10, .name = "ipgre", .ctl_name = NET_DECNET_CONF_GRE, - .timer3 = dn_send_brd_hello, + .dn_timer3 = dn_send_brd_hello, }, #if 0 { @@ -113,7 +113,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 120, .name = "x25", .ctl_name = NET_DECNET_CONF_X25, - .timer3 = dn_send_ptp_hello, + .dn_timer3 = dn_send_ptp_hello, }, #endif #if 0 @@ -125,7 +125,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 10, .name = "ppp", .ctl_name = NET_DECNET_CONF_PPP, - .timer3 = dn_send_brd_hello, + .dn_timer3 = dn_send_brd_hello, }, #endif { @@ -136,7 +136,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 120, .name = "ddcmp", .ctl_name = NET_DECNET_CONF_DDCMP, - .timer3 = dn_send_ptp_hello, + .dn_timer3 = dn_send_ptp_hello, }, { .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */ @@ -146,7 +146,7 @@ static struct dn_dev_parms dn_dev_list[] .t3 = 10, .name = "loopback", .ctl_name = NET_DECNET_CONF_LOOPBACK, - .timer3 = dn_send_brd_hello, + .dn_timer3 = dn_send_brd_hello, } }; @@ -305,11 +305,11 @@ static int dn_forwarding_proc(ctl_table */ tmp = dn_db->parms.forwarding; dn_db->parms.forwarding = old; - if (dn_db->parms.down) - dn_db->parms.down(dev); + if (dn_db->parms.dn_down) + dn_db->parms.dn_down(dev); dn_db->parms.forwarding = tmp; - if (dn_db->parms.up) - dn_db->parms.up(dev); + if (dn_db->parms.dn_up) + dn_db->parms.dn_up(dev); } return err; @@ -343,11 +343,11 @@ static int dn_forwarding_sysctl(ctl_tabl if (value > 2) return -EINVAL; - if (dn_db->parms.down) - dn_db->parms.down(dev); + if (dn_db->parms.dn_down) + dn_db->parms.dn_down(dev); dn_db->parms.forwarding = value; - if (dn_db->parms.up) - dn_db->parms.up(dev); + if (dn_db->parms.dn_up) + dn_db->parms.dn_up(dev); } return 0; @@ -1078,10 +1078,10 @@ static void dn_dev_timer_func(unsigned l struct dn_ifaddr *ifa; if (dn_db->t3 <= dn_db->parms.t2) { - if (dn_db->parms.timer3) { + if (dn_db->parms.dn_timer3) { for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { if (!(ifa->ifa_flags & IFA_F_SECONDARY)) - dn_db->parms.timer3(dev, ifa); + dn_db->parms.dn_timer3(dev, ifa); } } dn_db->t3 = dn_db->parms.t3; @@ -1140,8 +1140,8 @@ static struct dn_dev *dn_dev_create(stru return NULL; } - if (dn_db->parms.up) { - if (dn_db->parms.up(dev) < 0) { + if (dn_db->parms.dn_up) { + if (dn_db->parms.dn_up(dev) < 0) { neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); dev->dn_ptr = NULL; kfree(dn_db); @@ -1235,8 +1235,8 @@ static void dn_dev_delete(struct net_dev dn_dev_check_default(dev); neigh_ifdown(&dn_neigh_table, dev); - if (dn_db->parms.down) - dn_db->parms.down(dev); + if (dn_db->parms.dn_down) + dn_db->parms.dn_down(dev); dev->dn_ptr = NULL; Index: linux-2.6-tip/net/ipv4/icmp.c =================================================================== --- linux-2.6-tip.orig/net/ipv4/icmp.c +++ linux-2.6-tip/net/ipv4/icmp.c @@ -201,7 +201,10 @@ static const struct icmp_control icmp_po */ static struct sock *icmp_sk(struct net *net) { - return net->ipv4.icmp_sk[smp_processor_id()]; + /* + * Should be safe on PREEMPT_SOFTIRQS/HARDIRQS to use raw-smp-processor-id: + */ + return net->ipv4.icmp_sk[raw_smp_processor_id()]; } static inline struct sock *icmp_xmit_lock(struct net *net) Index: linux-2.6-tip/net/ipv4/route.c =================================================================== --- linux-2.6-tip.orig/net/ipv4/route.c +++ linux-2.6-tip/net/ipv4/route.c @@ -204,13 +204,13 @@ struct rt_hash_bucket { }; #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ - defined(CONFIG_PROVE_LOCKING) + defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_PREEMPT_RT) /* * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks * The size of this table is a power of two and depends on the number of CPUS. * (on lockdep we have a quite big spinlock_t, so keep the size down there) */ -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) # define RT_HASH_LOCK_SZ 256 #else # if NR_CPUS >= 32 Index: linux-2.6-tip/net/ipv6/netfilter/ip6_tables.c =================================================================== --- linux-2.6-tip.orig/net/ipv6/netfilter/ip6_tables.c +++ linux-2.6-tip/net/ipv6/netfilter/ip6_tables.c @@ -376,7 +376,7 @@ ip6t_do_table(struct sk_buff *skb, read_lock_bh(&table->lock); IP_NF_ASSERT(table->valid_hooks & (1 << hook)); private = table->private; - table_base = (void *)private->entries[smp_processor_id()]; + table_base = (void *)private->entries[raw_smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); /* For return from builtin chain */ Index: linux-2.6-tip/net/sched/sch_generic.c =================================================================== --- linux-2.6-tip.orig/net/sched/sch_generic.c +++ linux-2.6-tip/net/sched/sch_generic.c @@ -12,6 +12,7 @@ */ #include +#include #include #include #include @@ -24,6 +25,7 @@ #include #include #include +#include #include /* Main transmission queue. */ @@ -78,7 +80,7 @@ static inline int handle_dev_cpu_collisi { int ret; - if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { + if (unlikely(dev_queue->xmit_lock_owner == raw_smp_processor_id())) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We @@ -141,7 +143,7 @@ static inline int qdisc_restart(struct Q dev = qdisc_dev(q); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - HARD_TX_LOCK(dev, txq, smp_processor_id()); + HARD_TX_LOCK(dev, txq, raw_smp_processor_id()); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); @@ -691,9 +693,12 @@ void dev_deactivate(struct net_device *d /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ synchronize_rcu(); - /* Wait for outstanding qdisc_run calls. */ + /* + * Wait for outstanding qdisc_run calls. + * TODO: shouldnt this be wakeup-based, instead of polling it? + */ while (some_qdisc_is_busy(dev)) - yield(); + msleep(1); } static void dev_init_scheduler_queue(struct net_device *dev, patches/preempt-realtime-net-softirq-fixups.patch0000664000076400007640000000234511150327144021321 0ustar tglxtglxSubject: NOHZ: local_softirq_pending with tickless From: Mikulas Patocka On one of my machines with tickless kernel and plip I get messages : NOHZ: local_softirq_pending 08 always when using plip (on other machine with tickless kernel and plip I get no errors). Thebug happens both on 2.6.21 and 2.6.22-rc1 This patch fixes that. Note that plip calls netif_rx neither from hardware interrupt nor from ksoftirqd, so there is no one who would wake ksoftirqd then. netif_tx calls only __raise_softirq_irqoff(NET_RX_SOFTIRQ), which sets softirq bit, but doesn't wake ksoftirqd. Mikulas Signed-off-by: Mikulas Patocka Removed the remaining users of __raise_softirq_irqoff() as well. tglx Signed-off-by: Ingo Molnar --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/net/core/dev.c =================================================================== --- linux-2.6-tip.orig/net/core/dev.c +++ linux-2.6-tip/net/core/dev.c @@ -2793,7 +2793,7 @@ out: softnet_break: __get_cpu_var(netdev_rx_stat).time_squeeze++; - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + raise_softirq_irqoff(NET_RX_SOFTIRQ); goto out; } patches/dev-queue-xmit-preempt-fix.patch0000664000076400007640000002052211156214140017367 0ustar tglxtglxSubject: Preemption problem in kernel RT Patch] From: Ingo Molnar Date: Thu, 3 Jan 2008 09:22:03 +0100 ----- Forwarded message from mbeauch ----- Date: Wed, 02 Jan 2008 20:27:09 -0500 From: mbeauch To: mingo@elte.hu Here's the updated patch: Changed the real-time patch code to detect recursive calls to dev_queue_xmit and drop the packet when detected. Signed-off-by: Mark Beauchemin [ ported to latest upstream ] Signed-off-by: Ingo Molnar --- drivers/net/bnx2.c | 2 +- drivers/net/mv643xx_eth.c | 6 +++--- drivers/net/niu.c | 2 +- include/linux/netdevice.h | 30 +++++++++++++++--------------- net/core/dev.c | 10 +++------- net/core/netpoll.c | 2 +- net/sched/sch_generic.c | 4 ++-- 7 files changed, 26 insertions(+), 30 deletions(-) Index: linux-2.6-tip/drivers/net/bnx2.c =================================================================== --- linux-2.6-tip.orig/drivers/net/bnx2.c +++ linux-2.6-tip/drivers/net/bnx2.c @@ -2661,7 +2661,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2 if (unlikely(netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq, (void *)current); if ((netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) netif_tx_wake_queue(txq); Index: linux-2.6-tip/drivers/net/mv643xx_eth.c =================================================================== --- linux-2.6-tip.orig/drivers/net/mv643xx_eth.c +++ linux-2.6-tip/drivers/net/mv643xx_eth.c @@ -484,7 +484,7 @@ static void txq_maybe_wake(struct tx_que struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); if (netif_tx_queue_stopped(nq)) { - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, (void *)current); if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) netif_tx_wake_queue(nq); __netif_tx_unlock(nq); @@ -838,7 +838,7 @@ static void txq_kick(struct tx_queue *tx u32 hw_desc_ptr; u32 expected_ptr; - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, (void *)current); if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) goto out; @@ -862,7 +862,7 @@ static int txq_reclaim(struct tx_queue * struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, (void *)current); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { Index: linux-2.6-tip/drivers/net/niu.c =================================================================== --- linux-2.6-tip.orig/drivers/net/niu.c +++ linux-2.6-tip/drivers/net/niu.c @@ -3519,7 +3519,7 @@ static void niu_tx_work(struct niu *np, out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq, (void *)current); if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_tx_wake_queue(txq); Index: linux-2.6-tip/include/linux/netdevice.h =================================================================== --- linux-2.6-tip.orig/include/linux/netdevice.h +++ linux-2.6-tip/include/linux/netdevice.h @@ -439,7 +439,7 @@ struct netdev_queue { struct Qdisc *qdisc; unsigned long state; spinlock_t _xmit_lock; - int xmit_lock_owner; + void *xmit_lock_owner; struct Qdisc *qdisc_sleeping; } ____cacheline_aligned_in_smp; @@ -1625,35 +1625,35 @@ static inline void netif_rx_complete(str napi_complete(napi); } -static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) +static inline void __netif_tx_lock(struct netdev_queue *txq, void *curr) { spin_lock(&txq->_xmit_lock); - txq->xmit_lock_owner = cpu; + txq->xmit_lock_owner = curr; } static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - txq->xmit_lock_owner = raw_smp_processor_id(); + txq->xmit_lock_owner = (void *)current; } static inline int __netif_tx_trylock(struct netdev_queue *txq) { int ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) - txq->xmit_lock_owner = raw_smp_processor_id(); + txq->xmit_lock_owner = (void *)current; return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + txq->xmit_lock_owner = (void *)-1; spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + txq->xmit_lock_owner = (void *)-1; spin_unlock_bh(&txq->_xmit_lock); } @@ -1666,10 +1666,10 @@ static inline void __netif_tx_unlock_bh( static inline void netif_tx_lock(struct net_device *dev) { unsigned int i; - int cpu; + void *curr; spin_lock(&dev->tx_global_lock); - cpu = raw_smp_processor_id(); + curr = (void *)current; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -1679,7 +1679,7 @@ static inline void netif_tx_lock(struct * the ->hard_start_xmit() handler and already * checked the frozen bit. */ - __netif_tx_lock(txq, cpu); + __netif_tx_lock(txq, curr); set_bit(__QUEUE_STATE_FROZEN, &txq->state); __netif_tx_unlock(txq); } @@ -1715,9 +1715,9 @@ static inline void netif_tx_unlock_bh(st local_bh_enable(); } -#define HARD_TX_LOCK(dev, txq, cpu) { \ +#define HARD_TX_LOCK(dev, txq, curr) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - __netif_tx_lock(txq, cpu); \ + __netif_tx_lock(txq, curr); \ } \ } @@ -1730,14 +1730,14 @@ static inline void netif_tx_unlock_bh(st static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; - int cpu; + void *curr; local_bh_disable(); - cpu = raw_smp_processor_id(); + curr = (void *)current; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - __netif_tx_lock(txq, cpu); + __netif_tx_lock(txq, curr); netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } Index: linux-2.6-tip/net/core/dev.c =================================================================== --- linux-2.6-tip.orig/net/core/dev.c +++ linux-2.6-tip/net/core/dev.c @@ -1884,13 +1884,9 @@ gso: /* * No need to check for recursion with threaded interrupts: */ -#ifdef CONFIG_PREEMPT_RT - if (1) { -#else - if (txq->xmit_lock_owner != cpu) { -#endif + if (txq->xmit_lock_owner != (void *)current) { - HARD_TX_LOCK(dev, txq, cpu); + HARD_TX_LOCK(dev, txq, (void *)current); if (!netif_tx_queue_stopped(txq)) { rc = 0; @@ -4264,7 +4260,7 @@ static void __netdev_init_queue_locks_on { spin_lock_init(&dev_queue->_xmit_lock); netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); - dev_queue->xmit_lock_owner = -1; + dev_queue->xmit_lock_owner = (void *)-1; } static void netdev_init_queue_locks(struct net_device *dev) Index: linux-2.6-tip/net/core/netpoll.c =================================================================== --- linux-2.6-tip.orig/net/core/netpoll.c +++ linux-2.6-tip/net/core/netpoll.c @@ -69,7 +69,7 @@ static void queue_process(struct work_st txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); local_irq_save_nort(flags); - __netif_tx_lock(txq, smp_processor_id()); + __netif_tx_lock(txq, (void *)current); if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq) || ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { Index: linux-2.6-tip/net/sched/sch_generic.c =================================================================== --- linux-2.6-tip.orig/net/sched/sch_generic.c +++ linux-2.6-tip/net/sched/sch_generic.c @@ -80,7 +80,7 @@ static inline int handle_dev_cpu_collisi { int ret; - if (unlikely(dev_queue->xmit_lock_owner == raw_smp_processor_id())) { + if (unlikely(dev_queue->xmit_lock_owner == (void *)current)) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We @@ -143,7 +143,7 @@ static inline int qdisc_restart(struct Q dev = qdisc_dev(q); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - HARD_TX_LOCK(dev, txq, raw_smp_processor_id()); + HARD_TX_LOCK(dev, txq, (void *)current); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); patches/net-xmit-lock-owner-cleanup.patch0000664000076400007640000001524611156214140017531 0ustar tglxtglxSubject: net: xmit lock owner cleanup From: Ingo Molnar Date: Sun Feb 08 08:17:14 CET 2009 - __netif_tx_lock() always passes in 'current' as the lock owner, so eliminate this parameter. - likewise for HARD_TX_LOCK() Signed-off-by: Ingo Molnar --- drivers/net/bnx2.c | 2 +- drivers/net/mv643xx_eth.c | 6 +++--- drivers/net/niu.c | 2 +- include/linux/netdevice.h | 24 ++++++++++++++---------- net/core/dev.c | 4 ++-- net/core/netpoll.c | 2 +- net/sched/sch_generic.c | 4 ++-- 7 files changed, 24 insertions(+), 20 deletions(-) Index: linux-2.6-tip/drivers/net/bnx2.c =================================================================== --- linux-2.6-tip.orig/drivers/net/bnx2.c +++ linux-2.6-tip/drivers/net/bnx2.c @@ -2661,7 +2661,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2 if (unlikely(netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { - __netif_tx_lock(txq, (void *)current); + __netif_tx_lock(txq); if ((netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) netif_tx_wake_queue(txq); Index: linux-2.6-tip/drivers/net/mv643xx_eth.c =================================================================== --- linux-2.6-tip.orig/drivers/net/mv643xx_eth.c +++ linux-2.6-tip/drivers/net/mv643xx_eth.c @@ -484,7 +484,7 @@ static void txq_maybe_wake(struct tx_que struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); if (netif_tx_queue_stopped(nq)) { - __netif_tx_lock(nq, (void *)current); + __netif_tx_lock(nq); if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) netif_tx_wake_queue(nq); __netif_tx_unlock(nq); @@ -838,7 +838,7 @@ static void txq_kick(struct tx_queue *tx u32 hw_desc_ptr; u32 expected_ptr; - __netif_tx_lock(nq, (void *)current); + __netif_tx_lock(nq); if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) goto out; @@ -862,7 +862,7 @@ static int txq_reclaim(struct tx_queue * struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; - __netif_tx_lock(nq, (void *)current); + __netif_tx_lock(nq); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { Index: linux-2.6-tip/drivers/net/niu.c =================================================================== --- linux-2.6-tip.orig/drivers/net/niu.c +++ linux-2.6-tip/drivers/net/niu.c @@ -3519,7 +3519,7 @@ static void niu_tx_work(struct niu *np, out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { - __netif_tx_lock(txq, (void *)current); + __netif_tx_lock(txq); if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_tx_wake_queue(txq); Index: linux-2.6-tip/include/linux/netdevice.h =================================================================== --- linux-2.6-tip.orig/include/linux/netdevice.h +++ linux-2.6-tip/include/linux/netdevice.h @@ -1625,10 +1625,18 @@ static inline void netif_rx_complete(str napi_complete(napi); } -static inline void __netif_tx_lock(struct netdev_queue *txq, void *curr) +static inline void __netif_tx_lock(struct netdev_queue *txq) { spin_lock(&txq->_xmit_lock); - txq->xmit_lock_owner = curr; + txq->xmit_lock_owner = (void *)current; +} + +/* + * Do we hold the xmit_lock already? + */ +static inline int netif_tx_lock_recursion(struct netdev_queue *txq) +{ + return txq->xmit_lock_owner == (void *)current; } static inline void __netif_tx_lock_bh(struct netdev_queue *txq) @@ -1666,10 +1674,8 @@ static inline void __netif_tx_unlock_bh( static inline void netif_tx_lock(struct net_device *dev) { unsigned int i; - void *curr; spin_lock(&dev->tx_global_lock); - curr = (void *)current; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -1679,7 +1685,7 @@ static inline void netif_tx_lock(struct * the ->hard_start_xmit() handler and already * checked the frozen bit. */ - __netif_tx_lock(txq, curr); + __netif_tx_lock(txq); set_bit(__QUEUE_STATE_FROZEN, &txq->state); __netif_tx_unlock(txq); } @@ -1715,9 +1721,9 @@ static inline void netif_tx_unlock_bh(st local_bh_enable(); } -#define HARD_TX_LOCK(dev, txq, curr) { \ +#define HARD_TX_LOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - __netif_tx_lock(txq, curr); \ + __netif_tx_lock(txq); \ } \ } @@ -1730,14 +1736,12 @@ static inline void netif_tx_unlock_bh(st static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; - void *curr; local_bh_disable(); - curr = (void *)current; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - __netif_tx_lock(txq, curr); + __netif_tx_lock(txq); netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } Index: linux-2.6-tip/net/core/dev.c =================================================================== --- linux-2.6-tip.orig/net/core/dev.c +++ linux-2.6-tip/net/core/dev.c @@ -1884,9 +1884,9 @@ gso: /* * No need to check for recursion with threaded interrupts: */ - if (txq->xmit_lock_owner != (void *)current) { + if (!netif_tx_lock_recursion(txq)) { - HARD_TX_LOCK(dev, txq, (void *)current); + HARD_TX_LOCK(dev, txq); if (!netif_tx_queue_stopped(txq)) { rc = 0; Index: linux-2.6-tip/net/core/netpoll.c =================================================================== --- linux-2.6-tip.orig/net/core/netpoll.c +++ linux-2.6-tip/net/core/netpoll.c @@ -69,7 +69,7 @@ static void queue_process(struct work_st txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); local_irq_save_nort(flags); - __netif_tx_lock(txq, (void *)current); + __netif_tx_lock(txq); if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq) || ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { Index: linux-2.6-tip/net/sched/sch_generic.c =================================================================== --- linux-2.6-tip.orig/net/sched/sch_generic.c +++ linux-2.6-tip/net/sched/sch_generic.c @@ -80,7 +80,7 @@ static inline int handle_dev_cpu_collisi { int ret; - if (unlikely(dev_queue->xmit_lock_owner == (void *)current)) { + if (unlikely(netif_tx_lock_recursion(dev_queue))) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We @@ -143,7 +143,7 @@ static inline int qdisc_restart(struct Q dev = qdisc_dev(q); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - HARD_TX_LOCK(dev, txq, (void *)current); + HARD_TX_LOCK(dev, txq); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); patches/preempt-realtime-cleanup-dev-queue-xmit.patch0000664000076400007640000000457511150327144022045 0ustar tglxtglxSubject: net: clean up dev_queue_xmit() code flow From: Ingo Molnar Date: Sun Feb 08 08:39:41 CET 2009 Flatten out the dev_queue_xmit() code flow. This keeps the fall-through fast-path free for the compiler, and also helps code readability. Signed-off-by: Ingo Molnar --- net/core/dev.c | 63 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 28 deletions(-) Index: linux-2.6-tip/net/core/dev.c =================================================================== --- linux-2.6-tip.orig/net/core/dev.c +++ linux-2.6-tip/net/core/dev.c @@ -1878,45 +1878,52 @@ gso: Check this and shot the lock. It is not prone from deadlocks. Either shot noqueue qdisc, it is even simpler 8) */ - if (dev->flags & IFF_UP) { - int cpu = raw_smp_processor_id(); /* ok because BHs are off */ + if (!(dev->flags & IFF_UP)) + goto err; - /* - * No need to check for recursion with threaded interrupts: - */ - if (!netif_tx_lock_recursion(txq)) { + /* Recursion is detected! It is possible, unfortunately: */ + if (netif_tx_lock_recursion(txq)) + goto err_recursion; - HARD_TX_LOCK(dev, txq); + HARD_TX_LOCK(dev, txq); - if (!netif_tx_queue_stopped(txq)) { - rc = 0; - if (!dev_hard_start_xmit(skb, dev, txq)) { - HARD_TX_UNLOCK(dev, txq); - goto out; - } - } - HARD_TX_UNLOCK(dev, txq); - if (net_ratelimit()) - printk(KERN_CRIT "Virtual device %s asks to " - "queue packet!\n", dev->name); - } else { - /* Recursion is detected! It is possible, - * unfortunately */ - if (net_ratelimit()) - printk(KERN_CRIT "Dead loop on virtual device " - "%s, fix it urgently!\n", dev->name); - } + if (netif_tx_queue_stopped(txq)) + goto err_tx_unlock; + + if (dev_hard_start_xmit(skb, dev, txq)) + goto err_tx_unlock; + + rc = 0; + HARD_TX_UNLOCK(dev, txq); + +out: + rcu_read_unlock_bh(); + return rc; + +err_recursion: + if (net_ratelimit()) { + printk(KERN_CRIT + "Dead loop on virtual device %s, fix it urgently!\n", + dev->name); } + goto err; +err_tx_unlock: + HARD_TX_UNLOCK(dev, txq); + + if (net_ratelimit()) { + printk(KERN_CRIT "Virtual device %s asks to queue packet!\n", + dev->name); + } + /* Fall through: */ + +err: rc = -ENETDOWN; rcu_read_unlock_bh(); out_kfree_skb: kfree_skb(skb); return rc; -out: - rcu_read_unlock_bh(); - return rc; } patches/preempt-realtime-loopback.patch0000664000076400007640000000111111150327144017312 0ustar tglxtglxSubject: preempt: realtime loopback From: Ingo Molnar Date: Wed Feb 04 00:02:45 CET 2009 Signed-off-by: Ingo Molnar --- drivers/net/loopback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/drivers/net/loopback.c =================================================================== --- linux-2.6-tip.orig/drivers/net/loopback.c +++ linux-2.6-tip/drivers/net/loopback.c @@ -82,7 +82,7 @@ static int loopback_xmit(struct sk_buff lb_stats->packets++; put_cpu(); - netif_rx(skb); + netif_rx_ni(skb); return 0; } patches/preempt-realtime-fs-block.patch0000664000076400007640000001505011150327144017227 0ustar tglxtglxSubject: preempt: realtime fs block From: Ingo Molnar Date: Wed Feb 04 00:02:53 CET 2009 Signed-off-by: Ingo Molnar --- block/blk-core.c | 6 +++--- fs/aio.c | 4 +++- fs/dcache.c | 5 +++-- fs/exec.c | 5 ++++- fs/file.c | 5 +++-- fs/notify/dnotify/dnotify.c | 2 +- fs/pipe.c | 12 ++++++++++++ fs/proc/task_mmu.c | 4 +++- fs/xfs/linux-2.6/mrlock.h | 2 +- fs/xfs/xfs_mount.h | 2 +- 10 files changed, 34 insertions(+), 13 deletions(-) Index: linux-2.6-tip/block/blk-core.c =================================================================== --- linux-2.6-tip.orig/block/blk-core.c +++ linux-2.6-tip/block/blk-core.c @@ -212,7 +212,7 @@ EXPORT_SYMBOL(blk_dump_rq_flags); */ void blk_plug_device(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); /* * don't plug a stopped queue, it must be paired with blk_start_queue() @@ -252,7 +252,7 @@ EXPORT_SYMBOL(blk_plug_device_unlocked); */ int blk_remove_plug(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) return 0; @@ -362,7 +362,7 @@ static void blk_invoke_request_fn(struct **/ void blk_start_queue(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); blk_invoke_request_fn(q); Index: linux-2.6-tip/fs/aio.c =================================================================== --- linux-2.6-tip.orig/fs/aio.c +++ linux-2.6-tip/fs/aio.c @@ -605,9 +605,11 @@ static void use_mm(struct mm_struct *mm) task_lock(tsk); active_mm = tsk->active_mm; atomic_inc(&mm->mm_count); + local_irq_disable(); // FIXME + switch_mm(active_mm, mm, tsk); tsk->mm = mm; tsk->active_mm = mm; - switch_mm(active_mm, mm, tsk); + local_irq_enable(); task_unlock(tsk); mmdrop(active_mm); Index: linux-2.6-tip/fs/dcache.c =================================================================== --- linux-2.6-tip.orig/fs/dcache.c +++ linux-2.6-tip/fs/dcache.c @@ -726,8 +726,9 @@ void shrink_dcache_for_umount(struct sup { struct dentry *dentry; - if (down_read_trylock(&sb->s_umount)) - BUG(); +// -rt: this might succeed there ... +// if (down_read_trylock(&sb->s_umount)) +// BUG(); dentry = sb->s_root; sb->s_root = NULL; Index: linux-2.6-tip/fs/exec.c =================================================================== --- linux-2.6-tip.orig/fs/exec.c +++ linux-2.6-tip/fs/exec.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -739,10 +740,12 @@ static int exec_mmap(struct mm_struct *m } } task_lock(tsk); + local_irq_disable(); active_mm = tsk->active_mm; + activate_mm(active_mm, mm); tsk->mm = mm; tsk->active_mm = mm; - activate_mm(active_mm, mm); + local_irq_enable(); task_unlock(tsk); arch_pick_mmap_layout(mm); if (old_mm) { Index: linux-2.6-tip/fs/file.c =================================================================== --- linux-2.6-tip.orig/fs/file.c +++ linux-2.6-tip/fs/file.c @@ -102,14 +102,15 @@ void free_fdtable_rcu(struct rcu_head *r kfree(fdt->open_fds); kfree(fdt); } else { - fddef = &get_cpu_var(fdtable_defer_list); + + fddef = &per_cpu(fdtable_defer_list, raw_smp_processor_id()); + spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; /* vmallocs are handled from the workqueue context */ schedule_work(&fddef->wq); spin_unlock(&fddef->lock); - put_cpu_var(fdtable_defer_list); } } Index: linux-2.6-tip/fs/notify/dnotify/dnotify.c =================================================================== --- linux-2.6-tip.orig/fs/notify/dnotify/dnotify.c +++ linux-2.6-tip/fs/notify/dnotify/dnotify.c @@ -170,7 +170,7 @@ void dnotify_parent(struct dentry *dentr spin_lock(&dentry->d_lock); parent = dentry->d_parent; - if (parent->d_inode->i_dnotify_mask & event) { + if (unlikely(parent->d_inode->i_dnotify_mask & event)) { dget(parent); spin_unlock(&dentry->d_lock); __inode_dir_notify(parent->d_inode, event); Index: linux-2.6-tip/fs/pipe.c =================================================================== --- linux-2.6-tip.orig/fs/pipe.c +++ linux-2.6-tip/fs/pipe.c @@ -386,8 +386,14 @@ redo: wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } + /* + * Hack: we turn off atime updates for -RT kernels. + * Who uses them on pipes anyway? + */ +#ifndef CONFIG_PREEMPT_RT if (ret > 0) file_accessed(filp); +#endif return ret; } @@ -559,8 +565,14 @@ out: wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } + /* + * Hack: we turn off atime updates for -RT kernels. + * Who uses them on pipes anyway? + */ +#ifndef CONFIG_PREEMPT_RT if (ret > 0) file_update_time(filp); +#endif return ret; } Index: linux-2.6-tip/fs/proc/task_mmu.c =================================================================== --- linux-2.6-tip.orig/fs/proc/task_mmu.c +++ linux-2.6-tip/fs/proc/task_mmu.c @@ -137,8 +137,10 @@ static void *m_start(struct seq_file *m, vma = NULL; if ((unsigned long)l < mm->map_count) { vma = mm->mmap; - while (l-- && vma) + while (l-- && vma) { vma = vma->vm_next; + cond_resched(); + } goto out; } Index: linux-2.6-tip/fs/xfs/linux-2.6/mrlock.h =================================================================== --- linux-2.6-tip.orig/fs/xfs/linux-2.6/mrlock.h +++ linux-2.6-tip/fs/xfs/linux-2.6/mrlock.h @@ -21,7 +21,7 @@ #include typedef struct { - struct rw_semaphore mr_lock; + struct compat_rw_semaphore mr_lock; #ifdef DEBUG int mr_writer; #endif Index: linux-2.6-tip/fs/xfs/xfs_mount.h =================================================================== --- linux-2.6-tip.orig/fs/xfs/xfs_mount.h +++ linux-2.6-tip/fs/xfs/xfs_mount.h @@ -275,7 +275,7 @@ typedef struct xfs_mount { uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */ struct xfs_perag *m_perag; /* per-ag accounting info */ - struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ + struct compat_rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ struct mutex m_growlock; /* growfs mutex */ int m_fixedfsid[2]; /* unchanged for life of FS */ uint m_dmevmask; /* DMI events for this FS */ patches/preempt-realtime-acpi.patch0000664000076400007640000001260511150327745016455 0ustar tglxtglxSubject: preempt: realtime acpi From: Ingo Molnar Date: Wed Feb 04 00:02:52 CET 2009 Signed-off-by: Ingo Molnar --- drivers/acpi/acpica/acglobal.h | 7 ++++++- drivers/acpi/acpica/hwregs.c | 4 ++-- drivers/acpi/acpica/hwxface.c | 8 ++++---- drivers/acpi/acpica/utmutex.c | 2 +- drivers/acpi/ec.c | 15 ++++++++++++++- drivers/acpi/processor_idle.c | 2 +- include/acpi/acpiosxf.h | 2 +- 7 files changed, 29 insertions(+), 11 deletions(-) Index: linux-2.6-tip/drivers/acpi/acpica/acglobal.h =================================================================== --- linux-2.6-tip.orig/drivers/acpi/acpica/acglobal.h +++ linux-2.6-tip/drivers/acpi/acpica/acglobal.h @@ -190,7 +190,12 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pres * interrupt level */ ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ -ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ + +/* + * Need to be raw because it might be used in acpi_processor_idle(): + */ +ACPI_EXTERN raw_spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ + #define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock #define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock Index: linux-2.6-tip/drivers/acpi/acpica/hwregs.c =================================================================== --- linux-2.6-tip.orig/drivers/acpi/acpica/hwregs.c +++ linux-2.6-tip/drivers/acpi/acpica/hwregs.c @@ -74,7 +74,7 @@ acpi_status acpi_hw_clear_acpi_status(vo ACPI_BITMASK_ALL_FIXED_STATUS, (u16) acpi_gbl_FADT.xpm1a_event_block.address)); - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, ACPI_BITMASK_ALL_FIXED_STATUS); @@ -97,7 +97,7 @@ acpi_status acpi_hw_clear_acpi_status(vo status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } Index: linux-2.6-tip/drivers/acpi/acpica/hwxface.c =================================================================== --- linux-2.6-tip.orig/drivers/acpi/acpica/hwxface.c +++ linux-2.6-tip/drivers/acpi/acpica/hwxface.c @@ -313,9 +313,9 @@ acpi_status acpi_get_register(u32 regist acpi_status status; acpi_cpu_flags flags; - flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + spin_lock_irqsave(acpi_gbl_hardware_lock, flags); status = acpi_get_register_unlocked(register_id, return_value); - acpi_os_release_lock(acpi_gbl_hardware_lock, flags); + spin_unlock_irqrestore(acpi_gbl_hardware_lock, flags); return (status); } @@ -353,7 +353,7 @@ acpi_status acpi_set_register(u32 regist return_ACPI_STATUS(AE_BAD_PARAMETER); } - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); /* Always do a register read first so we can insert the new bits */ @@ -458,7 +458,7 @@ acpi_status acpi_set_register(u32 regist unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); /* Normalize the value that was read */ Index: linux-2.6-tip/drivers/acpi/acpica/utmutex.c =================================================================== --- linux-2.6-tip.orig/drivers/acpi/acpica/utmutex.c +++ linux-2.6-tip/drivers/acpi/acpica/utmutex.c @@ -117,7 +117,7 @@ void acpi_ut_mutex_terminate(void) /* Delete the spinlocks */ acpi_os_delete_lock(acpi_gbl_gpe_lock); - acpi_os_delete_lock(acpi_gbl_hardware_lock); +// acpi_os_delete_lock(acpi_gbl_hardware_lock); return_VOID; } Index: linux-2.6-tip/drivers/acpi/ec.c =================================================================== --- linux-2.6-tip.orig/drivers/acpi/ec.c +++ linux-2.6-tip/drivers/acpi/ec.c @@ -563,8 +563,21 @@ static u32 acpi_ec_gpe_handler(void *dat if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) { gpe_transaction(ec, status); if (ec_transaction_done(ec) && - (status & ACPI_EC_FLAG_IBF) == 0) + (status & ACPI_EC_FLAG_IBF) == 0) { +#if 0 wake_up(&ec->wait); +#else + // hack ... + if (waitqueue_active(&ec->wait)) { + struct task_struct *task; + + task = list_entry(ec->wait.task_list.next, + wait_queue_t, task_list)->private; + if (task) + wake_up_process(task); + } +#endif + } } ec_check_sci(ec, status); Index: linux-2.6-tip/drivers/acpi/processor_idle.c =================================================================== --- linux-2.6-tip.orig/drivers/acpi/processor_idle.c +++ linux-2.6-tip/drivers/acpi/processor_idle.c @@ -956,7 +956,7 @@ static int acpi_idle_enter_simple(struct } static int c3_cpu_count; -static DEFINE_SPINLOCK(c3_lock); +static DEFINE_RAW_SPINLOCK(c3_lock); /** * acpi_idle_enter_bm - enters C3 with proper BM handling Index: linux-2.6-tip/include/acpi/acpiosxf.h =================================================================== --- linux-2.6-tip.orig/include/acpi/acpiosxf.h +++ linux-2.6-tip/include/acpi/acpiosxf.h @@ -61,7 +61,7 @@ typedef enum { OSL_EC_BURST_HANDLER } acpi_execute_type; -#define ACPI_NO_UNIT_LIMIT ((u32) -1) +#define ACPI_NO_UNIT_LIMIT (INT_MAX/2) #define ACPI_MUTEX_SEM 1 /* Functions for acpi_os_signal */ patches/preempt-realtime-ipc.patch0000664000076400007640000000622311150327144016304 0ustar tglxtglxSubject: preempt: realtime ipc From: Ingo Molnar Date: Wed Feb 04 00:02:52 CET 2009 Signed-off-by: Ingo Molnar --- ipc/mqueue.c | 5 +++++ ipc/msg.c | 25 +++++++++++++++++++------ ipc/sem.c | 6 ++++++ 3 files changed, 30 insertions(+), 6 deletions(-) Index: linux-2.6-tip/ipc/mqueue.c =================================================================== --- linux-2.6-tip.orig/ipc/mqueue.c +++ linux-2.6-tip/ipc/mqueue.c @@ -787,12 +787,17 @@ static inline void pipelined_send(struct struct msg_msg *message, struct ext_wait_queue *receiver) { + /* + * Keep them in one critical section for PREEMPT_RT: + */ + preempt_disable(); receiver->msg = message; list_del(&receiver->list); receiver->state = STATE_PENDING; wake_up_process(receiver->task); smp_wmb(); receiver->state = STATE_READY; + preempt_enable(); } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() Index: linux-2.6-tip/ipc/msg.c =================================================================== --- linux-2.6-tip.orig/ipc/msg.c +++ linux-2.6-tip/ipc/msg.c @@ -259,12 +259,19 @@ static void expunge_all(struct msg_queue while (tmp != &msq->q_receivers) { struct msg_receiver *msr; + /* + * Make sure that the wakeup doesnt preempt + * this CPU prematurely. (on PREEMPT_RT) + */ + preempt_disable(); + msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; msr->r_msg = NULL; - wake_up_process(msr->r_tsk); - smp_mb(); + wake_up_process(msr->r_tsk); /* serializes */ msr->r_msg = ERR_PTR(res); + + preempt_enable(); } } @@ -611,22 +618,28 @@ static inline int pipelined_send(struct !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { + /* + * Make sure that the wakeup doesnt preempt + * this CPU prematurely. (on PREEMPT_RT) + */ + preempt_disable(); + list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; - wake_up_process(msr->r_tsk); - smp_mb(); + wake_up_process(msr->r_tsk); /* serializes */ msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_rtime = get_seconds(); - wake_up_process(msr->r_tsk); - smp_mb(); + wake_up_process(msr->r_tsk); /* serializes */ msr->r_msg = msg; + preempt_enable(); return 1; } + preempt_enable(); } } return 0; Index: linux-2.6-tip/ipc/sem.c =================================================================== --- linux-2.6-tip.orig/ipc/sem.c +++ linux-2.6-tip/ipc/sem.c @@ -415,6 +415,11 @@ static void update_queue (struct sem_arr struct sem_queue *n; /* + * make sure that the wakeup doesnt preempt + * _this_ cpu prematurely. (on preempt_rt) + */ + preempt_disable(); + /* * Continue scanning. The next operation * that must be checked depends on the type of the * completed operation: @@ -450,6 +455,7 @@ static void update_queue (struct sem_arr */ smp_wmb(); q->status = error; + preempt_enable(); q = n; } else { q = list_entry(q->list.next, struct sem_queue, list); patches/preempt-realtime-mm.patch0000664000076400007640000001570011156214137016145 0ustar tglxtglxSubject: preempt: realtime mm From: Ingo Molnar Date: Wed Feb 04 00:02:51 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/pagevec.h | 2 +- include/linux/vmstat.h | 10 ++++++++++ mm/bounce.c | 4 ++-- mm/memory.c | 7 +++++-- mm/mmap.c | 10 ++++++++-- mm/vmscan.c | 10 ++++++++-- mm/vmstat.c | 38 ++++++++++++++++++++++++++++++++------ 7 files changed, 66 insertions(+), 15 deletions(-) Index: linux-2.6-tip/include/linux/pagevec.h =================================================================== --- linux-2.6-tip.orig/include/linux/pagevec.h +++ linux-2.6-tip/include/linux/pagevec.h @@ -9,7 +9,7 @@ #define _LINUX_PAGEVEC_H /* 14 pointers + two long's align the pagevec structure to a power of two */ -#define PAGEVEC_SIZE 14 +#define PAGEVEC_SIZE 8 struct page; struct address_space; Index: linux-2.6-tip/include/linux/vmstat.h =================================================================== --- linux-2.6-tip.orig/include/linux/vmstat.h +++ linux-2.6-tip/include/linux/vmstat.h @@ -75,7 +75,12 @@ DECLARE_PER_CPU(struct vm_event_state, v static inline void __count_vm_event(enum vm_event_item item) { +#ifdef CONFIG_PREEMPT_RT + get_cpu_var(vm_event_states).event[item]++; + put_cpu(); +#else __get_cpu_var(vm_event_states).event[item]++; +#endif } static inline void count_vm_event(enum vm_event_item item) @@ -86,7 +91,12 @@ static inline void count_vm_event(enum v static inline void __count_vm_events(enum vm_event_item item, long delta) { +#ifdef CONFIG_PREEMPT_RT + get_cpu_var(vm_event_states).event[item] += delta; + put_cpu(); +#else __get_cpu_var(vm_event_states).event[item] += delta; +#endif } static inline void count_vm_events(enum vm_event_item item, long delta) Index: linux-2.6-tip/mm/bounce.c =================================================================== --- linux-2.6-tip.orig/mm/bounce.c +++ linux-2.6-tip/mm/bounce.c @@ -51,11 +51,11 @@ static void bounce_copy_vec(struct bio_v unsigned long flags; unsigned char *vto; - local_irq_save(flags); + local_irq_save_nort(flags); vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); memcpy(vto + to->bv_offset, vfrom, to->bv_len); kunmap_atomic(vto, KM_BOUNCE_READ); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #else /* CONFIG_HIGHMEM */ Index: linux-2.6-tip/mm/memory.c =================================================================== --- linux-2.6-tip.orig/mm/memory.c +++ linux-2.6-tip/mm/memory.c @@ -932,10 +932,13 @@ static unsigned long unmap_page_range(st return addr; } -#ifdef CONFIG_PREEMPT +#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT) # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) #else -/* No preempt: go for improved straight-line efficiency */ +/* + * No preempt: go for improved straight-line efficiency + * on PREEMPT_RT this is not a critical latency-path. + */ # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) #endif Index: linux-2.6-tip/mm/mmap.c =================================================================== --- linux-2.6-tip.orig/mm/mmap.c +++ linux-2.6-tip/mm/mmap.c @@ -1961,10 +1961,16 @@ SYSCALL_DEFINE2(munmap, unsigned long, a static inline void verify_mm_writelocked(struct mm_struct *mm) { #ifdef CONFIG_DEBUG_VM - if (unlikely(down_read_trylock(&mm->mmap_sem))) { +# ifdef CONFIG_PREEMPT_RT + if (unlikely(!rt_rwsem_is_locked(&mm->mmap_sem))) { WARN_ON(1); - up_read(&mm->mmap_sem); } +# else + if (unlikely(down_read_trylock(&mm->mmap_sem))) { + WARN_ON(1); + up_read(&mm->mmap_sem); + } +# endif #endif } Index: linux-2.6-tip/mm/vmscan.c =================================================================== --- linux-2.6-tip.orig/mm/vmscan.c +++ linux-2.6-tip/mm/vmscan.c @@ -23,6 +23,7 @@ #include #include #include +#include #include /* for try_to_release_page(), buffer_heads_over_limit */ #include @@ -1125,7 +1126,7 @@ static unsigned long shrink_inactive_lis } nr_reclaimed += nr_freed; - local_irq_disable(); + local_irq_disable_nort(); if (current_is_kswapd()) { __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); __count_vm_events(KSWAPD_STEAL, nr_freed); @@ -1166,9 +1167,14 @@ static unsigned long shrink_inactive_lis } } } while (nr_scanned < max_scan); + /* + * Non-PREEMPT_RT relies on IRQs-off protecting the page_states + * per-CPU data. PREEMPT_RT has that data protected even in + * __mod_page_state(), so no need to keep IRQs disabled. + */ spin_unlock(&zone->lru_lock); done: - local_irq_enable(); + local_irq_enable_nort(); pagevec_release(&pvec); return nr_reclaimed; } Index: linux-2.6-tip/mm/vmstat.c =================================================================== --- linux-2.6-tip.orig/mm/vmstat.c +++ linux-2.6-tip/mm/vmstat.c @@ -153,10 +153,14 @@ static void refresh_zone_stat_thresholds void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = pcp->vm_stat_diff + item; + struct per_cpu_pageset *pcp; + int cpu; long x; + s8 *p; + cpu = get_cpu(); + pcp = zone_pcp(zone, cpu); + p = pcp->vm_stat_diff + item; x = delta + *p; if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { @@ -164,6 +168,7 @@ void __mod_zone_page_state(struct zone * x = 0; } *p = x; + put_cpu(); } EXPORT_SYMBOL(__mod_zone_page_state); @@ -206,9 +211,13 @@ EXPORT_SYMBOL(mod_zone_page_state); */ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = pcp->vm_stat_diff + item; + struct per_cpu_pageset *pcp; + int cpu; + s8 *p; + cpu = get_cpu(); + pcp = zone_pcp(zone, cpu); + p = pcp->vm_stat_diff + item; (*p)++; if (unlikely(*p > pcp->stat_threshold)) { @@ -217,18 +226,34 @@ void __inc_zone_state(struct zone *zone, zone_page_state_add(*p + overstep, zone, item); *p = -overstep; } + put_cpu(); } void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { +#ifdef CONFIG_PREEMPT_RT + unsigned long flags; + struct zone *zone; + + zone = page_zone(page); + local_irq_save(flags); + __inc_zone_state(zone, item); + local_irq_restore(flags); +#else __inc_zone_state(page_zone(page), item); +#endif } EXPORT_SYMBOL(__inc_zone_page_state); void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { - struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); - s8 *p = pcp->vm_stat_diff + item; + struct per_cpu_pageset *pcp; + int cpu; + s8 *p; + + cpu = get_cpu(); + pcp = zone_pcp(zone, cpu); + p = pcp->vm_stat_diff + item; (*p)--; @@ -238,6 +263,7 @@ void __dec_zone_state(struct zone *zone, zone_page_state_add(*p - overstep, zone, item); *p = overstep; } + put_cpu(); } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) patches/preempt-realtime-init-show-enabled-debugs.patch0000664000076400007640000001031111156214137022305 0ustar tglxtglxSubject: preempt: realtime init show enabled debugs From: Ingo Molnar Date: Wed Feb 04 00:02:51 CET 2009 Signed-off-by: Ingo Molnar --- init/main.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) Index: linux-2.6-tip/init/main.c =================================================================== --- linux-2.6-tip.orig/init/main.c +++ linux-2.6-tip/init/main.c @@ -457,6 +457,8 @@ static noinline void __init_refok rest_i { int pid; + system_state = SYSTEM_BOOTING_SCHEDULER_OK; + kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); @@ -697,6 +699,9 @@ asmlinkage void __init start_kernel(void ftrace_init(); +#ifdef CONFIG_PREEMPT_RT + WARN_ON(irqs_disabled()); +#endif /* Do the rest non-__init'ed, we're now alive */ rest_init(); } @@ -786,12 +791,14 @@ static void __init do_basic_setup(void) static void __init do_pre_smp_initcalls(void) { initcall_t *call; + extern int spawn_desched_task(void); /* kmemcheck must initialize before all early initcalls: */ kmemcheck_init(); for (call = __initcall_start; call < __early_initcall_end; call++) do_one_initcall(*call); + spawn_desched_task(); } static void run_init_process(char *init_filename) @@ -826,6 +833,9 @@ static noinline int init_post(void) printk(KERN_WARNING "Failed to execute %s\n", ramdisk_execute_command); } +#ifdef CONFIG_PREEMPT_RT + WARN_ON(irqs_disabled()); +#endif /* * We try each of these until one succeeds. @@ -891,7 +901,51 @@ static int __init kernel_init(void * unu ramdisk_execute_command = NULL; prepare_namespace(); } +#ifdef CONFIG_PREEMPT_RT + WARN_ON(irqs_disabled()); +#endif +#define DEBUG_COUNT (defined(CONFIG_DEBUG_RT_MUTEXES) + defined(CONFIG_CRITICAL_PREEMPT_TIMING) + defined(CONFIG_CRITICAL_IRQSOFF_TIMING) + defined(CONFIG_FUNCTION_TRACE) + defined(CONFIG_DEBUG_SLAB) + defined(CONFIG_DEBUG_PAGEALLOC) + defined(CONFIG_LOCKDEP)) + +#if DEBUG_COUNT > 0 + printk(KERN_ERR "*****************************************************************************\n"); + printk(KERN_ERR "* *\n"); +#if DEBUG_COUNT == 1 + printk(KERN_ERR "* REMINDER, the following debugging option is turned on in your .config: *\n"); +#else + printk(KERN_ERR "* REMINDER, the following debugging options are turned on in your .config: *\n"); +#endif + printk(KERN_ERR "* *\n"); +#ifdef CONFIG_DEBUG_RT_MUTEXES + printk(KERN_ERR "* CONFIG_DEBUG_RT_MUTEXES *\n"); +#endif +#ifdef CONFIG_CRITICAL_PREEMPT_TIMING + printk(KERN_ERR "* CONFIG_CRITICAL_PREEMPT_TIMING *\n"); +#endif +#ifdef CONFIG_CRITICAL_IRQSOFF_TIMING + printk(KERN_ERR "* CONFIG_CRITICAL_IRQSOFF_TIMING *\n"); +#endif +#ifdef CONFIG_FUNCTION_TRACE + printk(KERN_ERR "* CONFIG_FUNCTION_TRACE *\n"); +#endif +#ifdef CONFIG_DEBUG_SLAB + printk(KERN_ERR "* CONFIG_DEBUG_SLAB *\n"); +#endif +#ifdef CONFIG_DEBUG_PAGEALLOC + printk(KERN_ERR "* CONFIG_DEBUG_PAGEALLOC *\n"); +#endif +#ifdef CONFIG_LOCKDEP + printk(KERN_ERR "* CONFIG_LOCKDEP *\n"); +#endif + printk(KERN_ERR "* *\n"); +#if DEBUG_COUNT == 1 + printk(KERN_ERR "* it may increase runtime overhead and latencies. *\n"); +#else + printk(KERN_ERR "* they may increase runtime overhead and latencies. *\n"); +#endif + printk(KERN_ERR "* *\n"); + printk(KERN_ERR "*****************************************************************************\n"); +#endif /* * Ok, we have completed the initial bootup, and * we're essentially up and running. Get rid of the patches/preempt-realtime-compile-fixes.patch0000664000076400007640000000135711150327144020300 0ustar tglxtglxSubject: preempt: realtime compile fixes From: Ingo Molnar Date: Wed Feb 04 00:02:51 CET 2009 Signed-off-by: Ingo Molnar --- drivers/block/paride/pseudo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/drivers/block/paride/pseudo.h =================================================================== --- linux-2.6-tip.orig/drivers/block/paride/pseudo.h +++ linux-2.6-tip/drivers/block/paride/pseudo.h @@ -43,7 +43,7 @@ static unsigned long ps_timeout; static int ps_tq_active = 0; static int ps_nice = 0; -static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); +static __attribute__((unused)) DEFINE_SPINLOCK(ps_spinlock); static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int); patches/preempt-realtime-console.patch0000664000076400007640000000371411150327144017175 0ustar tglxtglxSubject: preempt: realtime console From: Ingo Molnar Date: Wed Feb 04 00:02:51 CET 2009 Signed-off-by: Ingo Molnar --- drivers/video/console/fbcon.c | 5 +++-- include/linux/console.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) Index: linux-2.6-tip/drivers/video/console/fbcon.c =================================================================== --- linux-2.6-tip.orig/drivers/video/console/fbcon.c +++ linux-2.6-tip/drivers/video/console/fbcon.c @@ -1203,7 +1203,6 @@ static void fbcon_clear(struct vc_data * { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; - struct display *p = &fb_display[vc->vc_num]; u_int y_break; @@ -1235,10 +1234,11 @@ static void fbcon_putcs(struct vc_data * struct display *p = &fb_display[vc->vc_num]; struct fbcon_ops *ops = info->fbcon_par; - if (!fbcon_is_inactive(vc, info)) + if (!fbcon_is_inactive(vc, info)) { ops->putcs(vc, info, s, count, real_y(p, ypos), xpos, get_color(vc, info, scr_readw(s), 1), get_color(vc, info, scr_readw(s), 0)); + } } static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos) @@ -3221,6 +3221,7 @@ static const struct consw fb_con = { .con_screen_pos = fbcon_screen_pos, .con_getxy = fbcon_getxy, .con_resize = fbcon_resize, + .con_preemptible = 1, }; static struct notifier_block fbcon_event_notifier = { Index: linux-2.6-tip/include/linux/console.h =================================================================== --- linux-2.6-tip.orig/include/linux/console.h +++ linux-2.6-tip/include/linux/console.h @@ -55,6 +55,7 @@ struct consw { void (*con_invert_region)(struct vc_data *, u16 *, int); u16 *(*con_screen_pos)(struct vc_data *, int); unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); + int con_preemptible; // can it reschedule from within printk? }; extern const struct consw *conswitchp; patches/preempt-realtime-ide.patch0000664000076400007640000001551511156214137016301 0ustar tglxtglxSubject: preempt: realtime ide From: Ingo Molnar Date: Wed Feb 04 00:02:50 CET 2009 Signed-off-by: Ingo Molnar --- drivers/ide/alim15x3.c | 12 ++++++------ drivers/ide/hpt366.c | 4 ++-- drivers/ide/ide-io.c | 2 +- drivers/ide/ide-iops.c | 20 ++++++++++---------- drivers/ide/ide-probe.c | 6 +++--- drivers/ide/ide-taskfile.c | 6 +++--- 6 files changed, 25 insertions(+), 25 deletions(-) Index: linux-2.6-tip/drivers/ide/alim15x3.c =================================================================== --- linux-2.6-tip.orig/drivers/ide/alim15x3.c +++ linux-2.6-tip/drivers/ide/alim15x3.c @@ -90,7 +90,7 @@ static void ali_set_pio_mode(ide_drive_t if (r_clc >= 16) r_clc = 0; } - local_irq_save(flags); + local_irq_save_nort(flags); /* * PIO mode => ATA FIFO on, ATAPI FIFO off @@ -112,7 +112,7 @@ static void ali_set_pio_mode(ide_drive_t pci_write_config_byte(dev, port, s_clc); pci_write_config_byte(dev, port + unit + 2, (a_clc << 4) | r_clc); - local_irq_restore(flags); + local_irq_restore_nort(flags); } /** @@ -222,7 +222,7 @@ static unsigned int init_chipset_ali15x3 isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - local_irq_save(flags); + local_irq_save_nort(flags); if (m5229_revision < 0xC2) { /* @@ -313,7 +313,7 @@ out: } pci_dev_put(north); pci_dev_put(isa_dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); return 0; } @@ -375,7 +375,7 @@ static u8 ali_cable_detect(ide_hwif_t *h unsigned long flags; u8 cbl = ATA_CBL_PATA40, tmpbyte; - local_irq_save(flags); + local_irq_save_nort(flags); if (m5229_revision >= 0xC2) { /* @@ -396,7 +396,7 @@ static u8 ali_cable_detect(ide_hwif_t *h } } - local_irq_restore(flags); + local_irq_restore_nort(flags); return cbl; } Index: linux-2.6-tip/drivers/ide/hpt366.c =================================================================== --- linux-2.6-tip.orig/drivers/ide/hpt366.c +++ linux-2.6-tip/drivers/ide/hpt366.c @@ -1328,7 +1328,7 @@ static int __devinit init_dma_hpt366(ide dma_old = inb(base + 2); - local_irq_save(flags); + local_irq_save_nort(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); @@ -1339,7 +1339,7 @@ static int __devinit init_dma_hpt366(ide if (dma_new != dma_old) outb(dma_new, base + 2); - local_irq_restore(flags); + local_irq_restore_nort(flags); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); Index: linux-2.6-tip/drivers/ide/ide-io.c =================================================================== --- linux-2.6-tip.orig/drivers/ide/ide-io.c +++ linux-2.6-tip/drivers/ide/ide-io.c @@ -949,7 +949,7 @@ void ide_timer_expiry (unsigned long dat /* disable_irq_nosync ?? */ disable_irq(hwif->irq); /* local CPU only, as if we were handling an interrupt */ - local_irq_disable(); + local_irq_disable_nort(); if (hwif->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { Index: linux-2.6-tip/drivers/ide/ide-iops.c =================================================================== --- linux-2.6-tip.orig/drivers/ide/ide-iops.c +++ linux-2.6-tip/drivers/ide/ide-iops.c @@ -275,7 +275,7 @@ void ide_input_data(ide_drive_t *drive, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -285,7 +285,7 @@ void ide_input_data(ide_drive_t *drive, insl(data_addr, buf, len / 4); if ((io_32bit & 2) && !mmio) - local_irq_restore(flags); + local_irq_restore_nort(flags); if ((len & 3) >= 2) { if (mmio) @@ -321,7 +321,7 @@ void ide_output_data(ide_drive_t *drive, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -331,7 +331,7 @@ void ide_output_data(ide_drive_t *drive, outsl(data_addr, buf, len / 4); if ((io_32bit & 2) && !mmio) - local_irq_restore(flags); + local_irq_restore_nort(flags); if ((len & 3) >= 2) { if (mmio) @@ -509,12 +509,12 @@ static int __ide_wait_stat(ide_drive_t * if ((stat & ATA_BUSY) == 0) break; - local_irq_restore(flags); + local_irq_restore_nort(flags); *rstat = stat; return -EBUSY; } } - local_irq_restore(flags); + local_irq_restore_nort(flags); } /* * Allow status to settle, then read it again. @@ -681,17 +681,17 @@ int ide_driveid_update(ide_drive_t *driv printk("%s: CHECK for good STATUS\n", drive->name); return 0; } - local_irq_save(flags); + local_irq_save_nort(flags); SELECT_MASK(drive, 0); id = kmalloc(SECTOR_SIZE, GFP_ATOMIC); if (!id) { - local_irq_restore(flags); + local_irq_restore_nort(flags); return 0; } tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); (void)tp_ops->read_status(hwif); /* clear drive IRQ */ - local_irq_enable(); - local_irq_restore(flags); + local_irq_enable_nort(); + local_irq_restore_nort(flags); ide_fix_driveid(id); drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES]; Index: linux-2.6-tip/drivers/ide/ide-probe.c =================================================================== --- linux-2.6-tip.orig/drivers/ide/ide-probe.c +++ linux-2.6-tip/drivers/ide/ide-probe.c @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri int bswap = 1; /* local CPU only; some systems need this */ - local_irq_save(flags); + local_irq_save_nort(flags); /* read 512 bytes of id info */ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); - local_irq_restore(flags); + local_irq_restore_nort(flags); drive->dev_flags |= IDE_DFLAG_ID_READ; #ifdef DEBUG @@ -813,7 +813,7 @@ static int ide_probe_port(ide_hwif_t *hw rc = 0; } - local_irq_restore(flags); + local_irq_restore_nort(flags); /* * Use cached IRQ number. It might be (and is...) changed by probe Index: linux-2.6-tip/drivers/ide/ide-taskfile.c =================================================================== --- linux-2.6-tip.orig/drivers/ide/ide-taskfile.c +++ linux-2.6-tip/drivers/ide/ide-taskfile.c @@ -219,7 +219,7 @@ static void ide_pio_sector(ide_drive_t * offset %= PAGE_SIZE; #ifdef CONFIG_HIGHMEM - local_irq_save(flags); + local_irq_save_nort(flags); #endif buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; @@ -239,7 +239,7 @@ static void ide_pio_sector(ide_drive_t * kunmap_atomic(buf, KM_BIO_SRC_IRQ); #ifdef CONFIG_HIGHMEM - local_irq_restore(flags); + local_irq_restore_nort(flags); #endif } @@ -430,7 +430,7 @@ static ide_startstop_t pre_task_out_intr } if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) - local_irq_disable(); + local_irq_disable_nort(); ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL); ide_pio_datablock(drive, rq, 1); patches/preempt-realtime-input.patch0000664000076400007640000000270711150327144016673 0ustar tglxtglxSubject: preempt: realtime input From: Ingo Molnar Date: Wed Feb 04 00:02:50 CET 2009 Signed-off-by: Ingo Molnar --- drivers/input/gameport/gameport.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) Index: linux-2.6-tip/drivers/input/gameport/gameport.c =================================================================== --- linux-2.6-tip.orig/drivers/input/gameport/gameport.c +++ linux-2.6-tip/drivers/input/gameport/gameport.c @@ -20,6 +20,7 @@ #include #include #include +#include #include /* HZ */ #include #include @@ -98,12 +99,12 @@ static int gameport_measure_speed(struct tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); GET_TIME(t1); for (t = 0; t < 50; t++) gameport_read(gameport); GET_TIME(t2); GET_TIME(t3); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; } @@ -122,11 +123,11 @@ static int gameport_measure_speed(struct tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); rdtscl(t1); for (t = 0; t < 50; t++) gameport_read(gameport); rdtscl(t2); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if (t2 - t1 < tx) tx = t2 - t1; } patches/preempt-realtime-irqs.patch0000664000076400007640000001056211156707241016516 0ustar tglxtglxSubject: preempt: realtime irqs From: Ingo Molnar Date: Wed Feb 04 00:02:49 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/irq.h | 10 ++++------ kernel/irq/handle.c | 7 +++++++ kernel/irq/manage.c | 22 ++++++++++++++++------ kernel/irq/spurious.c | 3 +-- 4 files changed, 28 insertions(+), 14 deletions(-) Index: linux-2.6-tip/include/linux/irq.h =================================================================== --- linux-2.6-tip.orig/include/linux/irq.h +++ linux-2.6-tip/include/linux/irq.h @@ -156,7 +156,6 @@ struct irq_2_iommu; * @irqs_unhandled: stats field for spurious unhandled interrupts * @thread: Thread pointer for threaded preemptible irq handling * @wait_for_handler: Waitqueue to wait for a running preemptible handler - * @cycles: Timestamp for stats and debugging * @lock: locking for SMP * @affinity: IRQ affinity on SMP * @cpu: cpu index useful for balancing @@ -184,10 +183,10 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; - struct task_struct *thread; - wait_queue_head_t wait_for_handler; - cycles_t timestamp; - spinlock_t lock; + struct task_struct *thread; + wait_queue_head_t wait_for_handler; + cycles_t timestamp; + raw_spinlock_t lock; #ifdef CONFIG_SMP cpumask_var_t affinity; unsigned int cpu; @@ -421,7 +420,6 @@ extern int set_irq_msi(unsigned int irq, /* Early initialization of irqs */ extern void early_init_hardirqs(void); -extern cycles_t irq_timestamp(unsigned int irq); #if defined(CONFIG_PREEMPT_HARDIRQS) extern void init_hardirqs(void); Index: linux-2.6-tip/kernel/irq/handle.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/handle.c +++ linux-2.6-tip/kernel/irq/handle.c @@ -485,6 +485,13 @@ unsigned int __do_IRQ(unsigned int irq) desc->chip->end(irq); return 1; } + /* + * If the task is currently running in user mode, don't + * detect soft lockups. If CONFIG_DETECT_SOFTLOCKUP is not + * configured, this should be optimized out. + */ + if (user_mode(get_irq_regs())) + touch_softlockup_watchdog(); spin_lock(&desc->lock); if (desc->chip->ack) { Index: linux-2.6-tip/kernel/irq/manage.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/manage.c +++ linux-2.6-tip/kernel/irq/manage.c @@ -652,9 +652,9 @@ static struct irqaction *__free_irq(unsi * 'real' IRQ doesn't run in * parallel with our fake. ) */ if (action->flags & IRQF_SHARED) { - local_irq_save(flags); + local_irq_save_nort(flags); action->handler(irq, dev_id); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #endif return action; @@ -791,11 +791,11 @@ int request_irq(unsigned int irq, irq_ha unsigned long flags; disable_irq(irq); - local_irq_save(flags); + local_irq_save_nort(flags); handler(irq, dev_id); - local_irq_restore(flags); + local_irq_restore_nort(flags); enable_irq(irq); } #endif @@ -809,6 +809,11 @@ int hardirq_preemption = 1; EXPORT_SYMBOL(hardirq_preemption); +/* + * Real-Time Preemption depends on hardirq threading: + */ +#ifndef CONFIG_PREEMPT_RT + static int __init hardirq_preempt_setup (char *str) { if (!strncmp(str, "off", 3)) @@ -823,6 +828,7 @@ static int __init hardirq_preempt_setup __setup("hardirq-preempt=", hardirq_preempt_setup); +#endif /* * threaded simple handler @@ -982,12 +988,16 @@ static int do_irqd(void * __desc) sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m); while (!kthread_should_stop()) { - local_irq_disable(); + local_irq_disable_nort(); set_current_state(TASK_INTERRUPTIBLE); +#ifndef CONFIG_PREEMPT_RT irq_enter(); +#endif do_hardirq(desc); +#ifndef CONFIG_PREEMPT_RT irq_exit(); - local_irq_enable(); +#endif + local_irq_enable_nort(); cond_resched(); #ifdef CONFIG_SMP /* Index: linux-2.6-tip/kernel/irq/spurious.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/spurious.c +++ linux-2.6-tip/kernel/irq/spurious.c @@ -59,9 +59,8 @@ static int try_one_irq(int irq, struct i } action = action->next; } - local_irq_disable(); /* Now clean up the flags */ - spin_lock(&desc->lock); + spin_lock_irq(&desc->lock); action = desc->action; /* patches/irq-desc-init.patch0000664000076400007640000000121611156214136014726 0ustar tglxtglxSubject: irq: desc init From: Ingo Molnar Date: Mon Feb 09 23:43:36 CET 2009 Signed-off-by: Ingo Molnar --- kernel/irq/handle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/kernel/irq/handle.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/handle.c +++ linux-2.6-tip/kernel/irq/handle.c @@ -237,7 +237,7 @@ struct irq_desc irq_desc[NR_IRQS] __cach .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), } }; patches/preempt-realtime-fix-irqdesc-lock-initializers.patch0000664000076400007640000000420411156214136023400 0ustar tglxtglxSubject: rt: sparseirq build fix From: Ingo Molnar Date: Sun Feb 08 16:43:43 CET 2009 Convert it to raw locks: kernel/irq/manage.c: In function ‘thread_simple_irq’: kernel/irq/manage.c:821: error: ‘irq_desc’ undeclared (first use in this function) kernel/irq/manage.c:821: error: (Each undeclared identifier is reported only once kernel/irq/manage.c:821: error: for each function it appears in.) kernel/irq/manage.c: In function ‘thread_level_irq’: kernel/irq/manage.c:843: error: ‘irq_desc’ undeclared (first use in this function) kernel/irq/manage.c: In function ‘thread_fasteoi_irq’: kernel/irq/manage.c:855: error: ‘irq_desc’ undeclared (first use in this function) kernel/irq/manage.c: In function ‘thread_edge_irq’: kernel/irq/manage.c:867: error: ‘irq_desc’ undeclared (first use in this function) kernel/irq/manage.c: In function ‘thread_do_irq’: kernel/irq/manage.c:905: error: ‘irq_desc’ undeclared (first use in this function) kernel/irq/manage.c: In function ‘init_hardirqs’: kernel/irq/manage.c:1031: error: ‘irq_desc’ undeclared (first use in this function) make[1]: *** [kernel/irq/manage.o] Error 1 make: *** [kernel/irq/manage.o] Error 2 => fold back to: preempt-realtime-irqs.patch Signed-off-by: Ingo Molnar --- kernel/irq/handle.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) Index: linux-2.6-tip/kernel/irq/handle.c =================================================================== --- linux-2.6-tip.orig/kernel/irq/handle.c +++ linux-2.6-tip/kernel/irq/handle.c @@ -79,7 +79,7 @@ static struct irq_desc irq_desc_init = { .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + .lock = RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), }; void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) @@ -137,7 +137,7 @@ static struct irq_desc irq_desc_legacy[N .chip = &no_irq_chip, .handle_irq = handle_bad_irq, .depth = 1, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + .lock = RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), } }; patches/preempt-realtime-fix-sig-cputimer-lock.patch0000664000076400007640000000131111150327144021644 0ustar tglxtglxSubject: rt: signals struct lock init fix From: Ingo Molnar Date: Tue Feb 10 01:25:25 CET 2009 Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) Index: linux-2.6-tip/include/linux/init_task.h =================================================================== --- linux-2.6-tip.orig/include/linux/init_task.h +++ linux-2.6-tip/include/linux/init_task.h @@ -52,7 +52,7 @@ extern struct fs_struct init_fs; .cputimer = { \ .cputime = INIT_CPUTIME, \ .running = 0, \ - .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ + .lock = RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ }, \ } patches/preempt-realtime-net-drivers.patch0000664000076400007640000000127411150327144017774 0ustar tglxtglxSubject: preempt: realtime net drivers From: Ingo Molnar Date: Wed Feb 04 00:02:49 CET 2009 Signed-off-by: Ingo Molnar --- drivers/net/tulip/tulip_core.c | 1 + 1 file changed, 1 insertion(+) Index: linux-2.6-tip/drivers/net/tulip/tulip_core.c =================================================================== --- linux-2.6-tip.orig/drivers/net/tulip/tulip_core.c +++ linux-2.6-tip/drivers/net/tulip/tulip_core.c @@ -1801,6 +1801,7 @@ static void __devexit tulip_remove_one ( pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); + pci_disable_device (pdev); pci_set_drvdata (pdev, NULL); /* pci_power_off (pdev, -1); */ patches/preempt-realtime-printk.patch0000664000076400007640000001205311150327744017044 0ustar tglxtglxSubject: preempt: realtime printk From: Ingo Molnar Date: Wed Feb 04 00:02:49 CET 2009 Signed-off-by: Ingo Molnar --- kernel/printk.c | 63 +++++++++++++++++++++++++++++++++++++++++++++----------- lib/ratelimit.c | 2 - 2 files changed, 52 insertions(+), 13 deletions(-) Index: linux-2.6-tip/kernel/printk.c =================================================================== --- linux-2.6-tip.orig/kernel/printk.c +++ linux-2.6-tip/kernel/printk.c @@ -91,7 +91,7 @@ static int console_locked, console_suspe * It is also used in interesting ways to provide interlocking in * release_console_sem(). */ -static DEFINE_SPINLOCK(logbuf_lock); +static DEFINE_RAW_SPINLOCK(logbuf_lock); #define LOG_BUF_MASK (log_buf_len-1) #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) @@ -395,7 +395,7 @@ static void __call_console_drivers(unsig for (con = console_drivers; con; con = con->next) { if ((con->flags & CON_ENABLED) && con->write && - (cpu_online(smp_processor_id()) || + (cpu_online(raw_smp_processor_id()) || (con->flags & CON_ANYTIME))) con->write(con, &LOG_BUF(start), end - start); } @@ -511,6 +511,7 @@ static void zap_locks(void) spin_lock_init(&logbuf_lock); /* And make sure that we print immediately */ init_MUTEX(&console_sem); + zap_rt_locks(); } #if defined(CONFIG_PRINTK_TIME) @@ -592,7 +593,8 @@ static inline int can_use_console(unsign * interrupts disabled. It should return with 'lockbuf_lock' * released but interrupts still disabled. */ -static int acquire_console_semaphore_for_printk(unsigned int cpu) +static int acquire_console_semaphore_for_printk(unsigned int cpu, + unsigned long flags) { int retval = 0; @@ -613,6 +615,8 @@ static int acquire_console_semaphore_for } printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); + lockdep_on(); + local_irq_restore(flags); return retval; } static const char recursion_bug_msg [] = @@ -634,7 +638,7 @@ asmlinkage int vprintk(const char *fmt, preempt_disable(); /* This stops the holder of console_sem just where we want him */ raw_local_irq_save(flags); - this_cpu = smp_processor_id(); + this_cpu = raw_smp_processor_id(); /* * Ouch, printk recursed into itself! @@ -649,7 +653,8 @@ asmlinkage int vprintk(const char *fmt, */ if (!oops_in_progress) { recursion_bug = 1; - goto out_restore_irqs; + raw_local_irq_restore(flags); + goto out; } zap_locks(); } @@ -657,6 +662,7 @@ asmlinkage int vprintk(const char *fmt, lockdep_off(); spin_lock(&logbuf_lock); printk_cpu = this_cpu; + preempt_enable(); if (recursion_bug) { recursion_bug = 0; @@ -726,14 +732,10 @@ asmlinkage int vprintk(const char *fmt, * will release 'logbuf_lock' regardless of whether it * actually gets the semaphore or not. */ - if (acquire_console_semaphore_for_printk(this_cpu)) + if (acquire_console_semaphore_for_printk(this_cpu, flags)) release_console_sem(); - lockdep_on(); -out_restore_irqs: - raw_local_irq_restore(flags); - - preempt_enable(); +out: return printed_len; } EXPORT_SYMBOL(printk); @@ -996,15 +998,35 @@ void release_console_sem(void) _con_start = con_start; _log_end = log_end; con_start = log_end; /* Flush */ + /* + * on PREEMPT_RT, call console drivers with + * interrupts enabled (if printk was called + * with interrupts disabled): + */ +#ifdef CONFIG_PREEMPT_RT + spin_unlock_irqrestore(&logbuf_lock, flags); +#else spin_unlock(&logbuf_lock); stop_critical_timings(); /* don't trace print latency */ +#endif call_console_drivers(_con_start, _log_end); start_critical_timings(); +#ifndef CONFIG_PREEMPT_RT local_irq_restore(flags); +#endif } console_locked = 0; - up(&console_sem); spin_unlock_irqrestore(&logbuf_lock, flags); + up(&console_sem); + /* + * On PREEMPT_RT kernels __wake_up may sleep, so wake syslogd + * up only if we are in a preemptible section. We normally dont + * printk from non-preemptible sections so this is for the emergency + * case only. + */ +#ifdef CONFIG_PREEMPT_RT + if (!in_atomic() && !irqs_disabled()) +#endif if (wake_klogd) wake_up_klogd(); } @@ -1280,6 +1302,23 @@ int printk_ratelimit(void) } EXPORT_SYMBOL(printk_ratelimit); +static DEFINE_RAW_SPINLOCK(warn_lock); + +void __WARN_ON(const char *func, const char *file, const int line) +{ + unsigned long flags; + + spin_lock_irqsave(&warn_lock, flags); + printk("%s/%d[CPU#%d]: BUG in %s at %s:%d\n", + current->comm, current->pid, raw_smp_processor_id(), + func, file, line); + dump_stack(); + spin_unlock_irqrestore(&warn_lock, flags); +} + +EXPORT_SYMBOL(__WARN_ON); + + /** * printk_timed_ratelimit - caller-controlled printk ratelimiting * @caller_jiffies: pointer to caller's state Index: linux-2.6-tip/lib/ratelimit.c =================================================================== --- linux-2.6-tip.orig/lib/ratelimit.c +++ linux-2.6-tip/lib/ratelimit.c @@ -14,7 +14,7 @@ #include #include -static DEFINE_SPINLOCK(ratelimit_lock); +static DEFINE_RAW_SPINLOCK(ratelimit_lock); /* * __ratelimit - rate limiting patches/preempt-realtime-rawlocks.patch0000664000076400007640000001273211156214136017362 0ustar tglxtglxSubject: preempt: realtime rawlocks From: Ingo Molnar Date: Wed Feb 04 00:02:48 CET 2009 Signed-off-by: Ingo Molnar --- drivers/oprofile/oprofilefs.c | 2 +- drivers/pci/access.c | 2 +- drivers/video/console/vgacon.c | 2 +- include/linux/kprobes.h | 2 +- include/linux/oprofile.h | 2 +- include/linux/percpu_counter.h | 2 +- kernel/kprobes.c | 12 ++++++------ kernel/softlockup.c | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) Index: linux-2.6-tip/drivers/oprofile/oprofilefs.c =================================================================== --- linux-2.6-tip.orig/drivers/oprofile/oprofilefs.c +++ linux-2.6-tip/drivers/oprofile/oprofilefs.c @@ -21,7 +21,7 @@ #define OPROFILEFS_MAGIC 0x6f70726f -DEFINE_SPINLOCK(oprofilefs_lock); +DEFINE_RAW_SPINLOCK(oprofilefs_lock); static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) { Index: linux-2.6-tip/drivers/pci/access.c =================================================================== --- linux-2.6-tip.orig/drivers/pci/access.c +++ linux-2.6-tip/drivers/pci/access.c @@ -12,7 +12,7 @@ * configuration space. */ -static DEFINE_SPINLOCK(pci_lock); +static DEFINE_RAW_SPINLOCK(pci_lock); /* * Wrappers for all PCI configuration access functions. They just check Index: linux-2.6-tip/drivers/video/console/vgacon.c =================================================================== --- linux-2.6-tip.orig/drivers/video/console/vgacon.c +++ linux-2.6-tip/drivers/video/console/vgacon.c @@ -51,7 +51,7 @@ #include